diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-05 09:25:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-05 09:25:12 -0700 |
commit | b2c9a83d262a8feb022e24e9f9aadb66cb10a7a8 (patch) | |
tree | 6f3ded2dbdfd308e04bd781bc33bdb8af3a45558 /drivers/ufs | |
parent | 2981436374177f78539b026ce5bcbab8c251818e (diff) | |
parent | aa2a4ded05058f134a4dee1424f829d662e00cda (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull more SCSI updates from James Bottomley:
"Mostly small bug fixes plus other trivial updates.
The major change of note is moving ufs out of scsi and a minor update
to lpfc vmid handling"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (24 commits)
scsi: qla2xxx: Remove unused 'ql_dm_tgt_ex_pct' parameter
scsi: qla2xxx: Remove setting of 'req' and 'rsp' parameters
scsi: mpi3mr: Fix kernel-doc
scsi: lpfc: Add support for ATTO Fibre Channel devices
scsi: core: Return BLK_STS_TRANSPORT for ALUA transitioning
scsi: sd_zbc: Prevent zone information memory leak
scsi: sd: Fix potential NULL pointer dereference
scsi: mpi3mr: Rework mrioc->bsg_device model to fix warnings
scsi: myrb: Fix up null pointer access on myrb_cleanup()
scsi: core: Unexport scsi_bus_type
scsi: sd: Don't call blk_cleanup_disk() in sd_probe()
scsi: ufs: ufshcd: Delete unnecessary NULL check
scsi: isci: Fix typo in comment
scsi: pmcraid: Fix typo in comment
scsi: smartpqi: Fix typo in comment
scsi: qedf: Fix typo in comment
scsi: esas2r: Fix typo in comment
scsi: storvsc: Fix typo in comment
scsi: ufs: Split the drivers/scsi/ufs directory
scsi: qla1280: Remove redundant variable
...
Diffstat (limited to 'drivers/ufs')
43 files changed, 24259 insertions, 0 deletions
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig new file mode 100644 index 000000000000..90226f72c158 --- /dev/null +++ b/drivers/ufs/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# UFS subsystem configuration +# + +menuconfig SCSI_UFSHCD + tristate "Universal Flash Storage Controller" + depends on SCSI && SCSI_DMA + select PM_DEVFREQ + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select NLS + help + Enables support for UFS (Universal Flash Storage) host controllers. + A UFS host controller is an electronic component that is able to + communicate with a UFS card. UFS host controllers occur in + smartphones, laptops, digital cameras and also in cars. + The kernel module will be called ufshcd. + + To compile this driver as a module, choose M here and read + <file:Documentation/scsi/ufs.rst>. + However, do not compile this as a module if your root file system + (the one containing the directory /) is located on a UFS device. + +if SCSI_UFSHCD + +source "drivers/ufs/core/Kconfig" + +source "drivers/ufs/host/Kconfig" + +endif diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile new file mode 100644 index 000000000000..5a199ef18d4c --- /dev/null +++ b/drivers/ufs/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +# The link order is important here. ufshcd-core must initialize +# before vendor drivers. +obj-$(CONFIG_SCSI_UFSHCD) += core/ host/ diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig new file mode 100644 index 000000000000..e11978171403 --- /dev/null +++ b/drivers/ufs/core/Kconfig @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Kernel configuration file for the UFS Host Controller core. +# +# Copyright (C) 2011-2013 Samsung India Software Operations +# +# Authors: +# Santosh Yaraganavi <santosh.sy@samsung.com> +# Vinayak Holikatti <h.vinayak@samsung.com> + +config SCSI_UFS_BSG + bool "Universal Flash Storage BSG device node" + select BLK_DEV_BSGLIB + help + Universal Flash Storage (UFS) is SCSI transport specification for + accessing flash storage on digital cameras, mobile phones and + consumer electronic devices. + A UFS controller communicates with a UFS device by exchanging + UFS Protocol Information Units (UPIUs). + UPIUs can not only be used as a transport layer for the SCSI protocol + but are also used by the UFS native command set. + This transport driver supports exchanging UFS protocol information units + with a UFS device. See also the ufshcd driver, which is a SCSI driver + that supports UFS devices. + + Select this if you need a bsg device node for your UFS controller. + If unsure, say N. + +config SCSI_UFS_CRYPTO + bool "UFS Crypto Engine Support" + depends on BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in UFS. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the UFS device (if present) to perform crypto + operations on data being transferred to/from the device. + +config SCSI_UFS_HPB + bool "Support UFS Host Performance Booster" + help + The UFS HPB feature improves random read performance. It caches + L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB + read command by piggybacking physical page number for bypassing FTL (flash + translation layer)'s L2P address translation. + +config SCSI_UFS_FAULT_INJECTION + bool "UFS Fault Injection Support" + depends on FAULT_INJECTION + help + Enable fault injection support in the UFS driver. This makes it easier + to test the UFS error handler and abort handler. + +config SCSI_UFS_HWMON + bool "UFS Temperature Notification" + depends on SCSI_UFSHCD=HWMON || HWMON=y + help + This provides support for UFS hardware monitoring. If enabled, + a hardware monitoring device will be created for the UFS device. + + If unsure, say N. diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile new file mode 100644 index 000000000000..62f38c5bf857 --- /dev/null +++ b/drivers/ufs/core/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o +ufshcd-core-y += ufshcd.o ufs-sysfs.o +ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o +ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o +ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o +ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o +ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o +ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o diff --git a/drivers/ufs/core/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c new file mode 100644 index 000000000000..e3baed6c70bd --- /dev/null +++ b/drivers/ufs/core/ufs-debugfs.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2020 Intel Corporation + +#include <linux/debugfs.h> + +#include "ufs-debugfs.h" +#include <ufs/ufshcd.h> +#include "ufshcd-priv.h" + +static struct dentry *ufs_debugfs_root; + +struct ufs_debugfs_attr { + const char *name; + mode_t mode; + const struct file_operations *fops; +}; + +/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */ +static inline struct ufs_hba *hba_from_file(const struct file *file) +{ + return d_inode(file->f_path.dentry->d_parent)->i_private; +} + +void __init ufs_debugfs_init(void) +{ + ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL); +} + +void ufs_debugfs_exit(void) +{ + debugfs_remove_recursive(ufs_debugfs_root); +} + +static int ufs_debugfs_stats_show(struct seq_file *s, void *data) +{ + struct ufs_hba *hba = hba_from_file(s->file); + struct ufs_event_hist *e = hba->ufs_stats.event; + +#define PRT(fmt, typ) \ + seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt) + + PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR); + PRT("Data Link Layer errors: %llu\n", DL_ERR); + PRT("Network Layer errors: %llu\n", NL_ERR); + PRT("Transport Layer errors: %llu\n", TL_ERR); + PRT("Generic DME errors: %llu\n", DME_ERR); + PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR); + PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR); + PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL); + PRT("PM Resume errors: %llu\n", RESUME_ERR); + PRT("PM Suspend errors : %llu\n", SUSPEND_ERR); + PRT("Logical Unit Resets: %llu\n", DEV_RESET); + PRT("Host Resets: %llu\n", HOST_RESET); + PRT("SCSI command aborts: %llu\n", ABORT); +#undef PRT + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats); + +static int ee_usr_mask_get(void *data, u64 *val) +{ + struct ufs_hba *hba = data; + + *val = hba->ee_usr_mask; + return 0; +} + +static int ufs_debugfs_get_user_access(struct ufs_hba *hba) +__acquires(&hba->host_sem) +{ + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + ufshcd_rpm_get_sync(hba); + return 0; +} + +static void ufs_debugfs_put_user_access(struct ufs_hba *hba) +__releases(&hba->host_sem) +{ + ufshcd_rpm_put_sync(hba); + up(&hba->host_sem); +} + +static int ee_usr_mask_set(void *data, u64 val) +{ + struct ufs_hba *hba = data; + int err; + + if (val & ~(u64)MASK_EE_STATUS) + return -EINVAL; + err = ufs_debugfs_get_user_access(hba); + if (err) + return err; + err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS); + ufs_debugfs_put_user_access(hba); + return err; +} + +DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n"); + +void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) +{ + bool chgd = false; + u16 ee_ctrl_mask; + int err = 0; + + if (!hba->debugfs_ee_rate_limit_ms || !status) + return; + + mutex_lock(&hba->ee_ctrl_mutex); + ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status); + chgd = ee_ctrl_mask != hba->ee_ctrl_mask; + if (chgd) { + err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); + if (err) + dev_err(hba->dev, "%s: failed to write ee control %d\n", + __func__, err); + } + mutex_unlock(&hba->ee_ctrl_mutex); + + if (chgd && !err) { + unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms); + + queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay); + } +} + +static void ufs_debugfs_restart_ee(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work); + + if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) || + ufs_debugfs_get_user_access(hba)) + return; + ufshcd_write_ee_control(hba); + ufs_debugfs_put_user_access(hba); +} + +static int ufs_saved_err_show(struct seq_file *s, void *data) +{ + struct ufs_debugfs_attr *attr = s->private; + struct ufs_hba *hba = hba_from_file(s->file); + const int *p; + + if (strcmp(attr->name, "saved_err") == 0) { + p = &hba->saved_err; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + p = &hba->saved_uic_err; + } else { + return -ENOENT; + } + + seq_printf(s, "%d\n", *p); + return 0; +} + +static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ufs_debugfs_attr *attr = file->f_inode->i_private; + struct ufs_hba *hba = hba_from_file(file); + char val_str[16] = { }; + int val, ret; + + if (count > sizeof(val_str)) + return -EINVAL; + if (copy_from_user(val_str, buf, count)) + return -EFAULT; + ret = kstrtoint(val_str, 0, &val); + if (ret < 0) + return ret; + + spin_lock_irq(hba->host->host_lock); + if (strcmp(attr->name, "saved_err") == 0) { + hba->saved_err = val; + } else if (strcmp(attr->name, "saved_uic_err") == 0) { + hba->saved_uic_err = val; + } else { + ret = -ENOENT; + } + if (ret == 0) + ufshcd_schedule_eh_work(hba); + spin_unlock_irq(hba->host->host_lock); + + return ret < 0 ? ret : count; +} + +static int ufs_saved_err_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_saved_err_show, inode->i_private); +} + +static const struct file_operations ufs_saved_err_fops = { + .owner = THIS_MODULE, + .open = ufs_saved_err_open, + .read = seq_read, + .write = ufs_saved_err_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct ufs_debugfs_attr ufs_attrs[] = { + { "stats", 0400, &ufs_debugfs_stats_fops }, + { "saved_err", 0600, &ufs_saved_err_fops }, + { "saved_uic_err", 0600, &ufs_saved_err_fops }, + { } +}; + +void ufs_debugfs_hba_init(struct ufs_hba *hba) +{ + const struct ufs_debugfs_attr *attr; + struct dentry *root; + + /* Set default exception event rate limit period to 20ms */ + hba->debugfs_ee_rate_limit_ms = 20; + INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee); + + root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root); + if (IS_ERR_OR_NULL(root)) + return; + hba->debugfs_root = root; + d_inode(root)->i_private = hba; + for (attr = ufs_attrs; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, root, (void *)attr, + attr->fops); + debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root, + hba, &ee_usr_mask_fops); + debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root, + &hba->debugfs_ee_rate_limit_ms); +} + +void ufs_debugfs_hba_exit(struct ufs_hba *hba) +{ + debugfs_remove_recursive(hba->debugfs_root); + cancel_delayed_work_sync(&hba->debugfs_ee_work); +} diff --git a/drivers/ufs/core/ufs-debugfs.h b/drivers/ufs/core/ufs-debugfs.h new file mode 100644 index 000000000000..97548a3f90eb --- /dev/null +++ b/drivers/ufs/core/ufs-debugfs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 Intel Corporation + */ + +#ifndef __UFS_DEBUGFS_H__ +#define __UFS_DEBUGFS_H__ + +struct ufs_hba; + +#ifdef CONFIG_DEBUG_FS +void __init ufs_debugfs_init(void); +void ufs_debugfs_exit(void); +void ufs_debugfs_hba_init(struct ufs_hba *hba); +void ufs_debugfs_hba_exit(struct ufs_hba *hba); +void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status); +#else +static inline void ufs_debugfs_init(void) {} +static inline void ufs_debugfs_exit(void) {} +static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {} +static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {} +static inline void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) {} +#endif + +#endif diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c new file mode 100644 index 000000000000..7ac7c4e7ff83 --- /dev/null +++ b/drivers/ufs/core/ufs-fault-injection.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/kconfig.h> +#include <linux/types.h> +#include <linux/fault-inject.h> +#include <linux/module.h> +#include "ufs-fault-injection.h" + +static int ufs_fault_get(char *buffer, const struct kernel_param *kp); +static int ufs_fault_set(const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops ufs_fault_ops = { + .get = ufs_fault_get, + .set = ufs_fault_set, +}; + +enum { FAULT_INJ_STR_SIZE = 80 }; + +/* + * For more details about fault injection, please refer to + * Documentation/fault-injection/fault-injection.rst. + */ +static char g_trigger_eh_str[FAULT_INJ_STR_SIZE]; +module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644); +MODULE_PARM_DESC(trigger_eh, + "Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>"); +static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr); + +static char g_timeout_str[FAULT_INJ_STR_SIZE]; +module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644); +MODULE_PARM_DESC(timeout, + "Fault injection. timeout=<interval>,<probability>,<space>,<times>"); +static DECLARE_FAULT_ATTR(ufs_timeout_attr); + +static int ufs_fault_get(char *buffer, const struct kernel_param *kp) +{ + const char *fault_str = kp->arg; + + return sysfs_emit(buffer, "%s\n", fault_str); +} + +static int ufs_fault_set(const char *val, const struct kernel_param *kp) +{ + struct fault_attr *attr = NULL; + + if (kp->arg == g_trigger_eh_str) + attr = &ufs_trigger_eh_attr; + else if (kp->arg == g_timeout_str) + attr = &ufs_timeout_attr; + + if (WARN_ON_ONCE(!attr)) + return -EINVAL; + + if (!setup_fault_attr(attr, (char *)val)) + return -EINVAL; + + strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE); + + return 0; +} + +bool ufs_trigger_eh(void) +{ + return should_fail(&ufs_trigger_eh_attr, 1); +} + +bool ufs_fail_completion(void) +{ + return should_fail(&ufs_timeout_attr, 1); +} diff --git a/drivers/ufs/core/ufs-fault-injection.h b/drivers/ufs/core/ufs-fault-injection.h new file mode 100644 index 000000000000..6d0cd8e10c87 --- /dev/null +++ b/drivers/ufs/core/ufs-fault-injection.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _UFS_FAULT_INJECTION_H +#define _UFS_FAULT_INJECTION_H + +#include <linux/kconfig.h> +#include <linux/types.h> + +#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION +bool ufs_trigger_eh(void); +bool ufs_fail_completion(void); +#else +static inline bool ufs_trigger_eh(void) +{ + return false; +} + +static inline bool ufs_fail_completion(void) +{ + return false; +} +#endif + +#endif /* _UFS_FAULT_INJECTION_H */ diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c new file mode 100644 index 000000000000..4c6a872b7a7c --- /dev/null +++ b/drivers/ufs/core/ufs-hwmon.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * UFS hardware monitoring support + * Copyright (c) 2021, Western Digital Corporation + */ + +#include <linux/hwmon.h> +#include <linux/units.h> + +#include <ufs/ufshcd.h> +#include "ufshcd-priv.h" + +struct ufs_hwmon_data { + struct ufs_hba *hba; + u8 mask; +}; + +static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val) +{ + u32 ee_mask; + int err; + + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, + &ee_mask); + if (err) + return err; + + *val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP); + + return 0; +} + +static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val) +{ + u32 value; + int err; + + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value); + if (err) + return err; + + if (value == 0) + return -ENODATA; + + *val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE; + + return 0; +} + +static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, + long *val) +{ + struct ufs_hwmon_data *data = dev_get_drvdata(dev); + struct ufs_hba *hba = data->hba; + int err; + + down(&hba->host_sem); + + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + + switch (attr) { + case hwmon_temp_enable: + err = ufs_read_temp_enable(hba, data->mask, val); + + break; + case hwmon_temp_crit: + err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val); + + break; + case hwmon_temp_lcrit: + err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val); + + break; + case hwmon_temp_input: + err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val); + + break; + default: + err = -EOPNOTSUPP; + + break; + } + + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + + return err; +} + +static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, + long val) +{ + struct ufs_hwmon_data *data = dev_get_drvdata(dev); + struct ufs_hba *hba = data->hba; + int err; + + if (attr != hwmon_temp_enable) + return -EINVAL; + + if (val != 0 && val != 1) + return -EINVAL; + + down(&hba->host_sem); + + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + + if (val == 1) + err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0); + else + err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP); + + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + + return err; +} + +static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr, + int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_enable: + return 0644; + case hwmon_temp_crit: + case hwmon_temp_lcrit: + case hwmon_temp_input: + return 0444; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *ufs_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT), + NULL +}; + +static const struct hwmon_ops ufs_hwmon_ops = { + .is_visible = ufs_hwmon_is_visible, + .read = ufs_hwmon_read, + .write = ufs_hwmon_write, +}; + +static const struct hwmon_chip_info ufs_hwmon_hba_info = { + .ops = &ufs_hwmon_ops, + .info = ufs_hwmon_info, +}; + +void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) +{ + struct device *dev = hba->dev; + struct ufs_hwmon_data *data; + struct device *hwmon; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + data->hba = hba; + data->mask = mask; + + hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL); + if (IS_ERR(hwmon)) { + dev_warn(dev, "Failed to instantiate hwmon device\n"); + kfree(data); + return; + } + + hba->hwmon_device = hwmon; +} + +void ufs_hwmon_remove(struct ufs_hba *hba) +{ + struct ufs_hwmon_data *data; + + if (!hba->hwmon_device) + return; + + data = dev_get_drvdata(hba->hwmon_device); + hwmon_device_unregister(hba->hwmon_device); + hba->hwmon_device = NULL; + kfree(data); +} + +void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) +{ + if (!hba->hwmon_device) + return; + + if (ee_mask & MASK_EE_TOO_HIGH_TEMP) + hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0); + + if (ee_mask & MASK_EE_TOO_LOW_TEMP) + hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0); +} diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c new file mode 100644 index 000000000000..0a088b47d557 --- /dev/null +++ b/drivers/ufs/core/ufs-sysfs.c @@ -0,0 +1,1268 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Western Digital Corporation + +#include <linux/err.h> +#include <linux/string.h> +#include <linux/bitfield.h> +#include <asm/unaligned.h> + +#include <ufs/ufs.h> +#include "ufs-sysfs.h" +#include "ufshcd-priv.h" + +static const char *ufshcd_uic_link_state_to_string( + enum uic_link_state state) +{ + switch (state) { + case UIC_LINK_OFF_STATE: return "OFF"; + case UIC_LINK_ACTIVE_STATE: return "ACTIVE"; + case UIC_LINK_HIBERN8_STATE: return "HIBERN8"; + case UIC_LINK_BROKEN_STATE: return "BROKEN"; + default: return "UNKNOWN"; + } +} + +static const char *ufshcd_ufs_dev_pwr_mode_to_string( + enum ufs_dev_pwr_mode state) +{ + switch (state) { + case UFS_ACTIVE_PWR_MODE: return "ACTIVE"; + case UFS_SLEEP_PWR_MODE: return "SLEEP"; + case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN"; + case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP"; + default: return "UNKNOWN"; + } +} + +static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, + bool rpm) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_dev_info *dev_info = &hba->dev_info; + unsigned long flags, value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + if (value >= UFS_PM_LVL_MAX) + return -EINVAL; + + if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE && + (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) || + !(dev_info->wspecversion >= 0x310))) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (rpm) + hba->rpm_lvl = value; + else + hba->spm_lvl = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t rpm_lvl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->rpm_lvl); +} + +static ssize_t rpm_lvl_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true); +} + +static ssize_t rpm_target_dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[hba->rpm_lvl].dev_state)); +} + +static ssize_t rpm_target_link_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( + ufs_pm_lvl_states[hba->rpm_lvl].link_state)); +} + +static ssize_t spm_lvl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->spm_lvl); +} + +static ssize_t spm_lvl_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false); +} + +static ssize_t spm_target_dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( + ufs_pm_lvl_states[hba->spm_lvl].dev_state)); +} + +static ssize_t spm_target_link_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( + ufs_pm_lvl_states[hba->spm_lvl].link_state)); +} + +/* Convert Auto-Hibernate Idle Timer register value to microseconds */ +static int ufshcd_ahit_to_us(u32 ahit) +{ + int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit); + int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit); + + for (; scale > 0; --scale) + timer *= UFSHCI_AHIBERN8_SCALE_FACTOR; + + return timer; +} + +/* Convert microseconds to Auto-Hibernate Idle Timer register value */ +static u32 ufshcd_us_to_ahit(unsigned int timer) +{ + unsigned int scale; + + for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) + timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; + + return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); +} + +static ssize_t auto_hibern8_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 ahit; + int ret; + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!ufshcd_is_auto_hibern8_supported(hba)) + return -EOPNOTSUPP; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); + ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + ufshcd_release(hba); + pm_runtime_put_sync(hba->dev); + + ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); + +out: + up(&hba->host_sem); + return ret; +} + +static ssize_t auto_hibern8_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int timer; + int ret = 0; + + if (!ufshcd_is_auto_hibern8_supported(hba)) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &timer)) + return -EINVAL; + + if (timer > UFSHCI_AHIBERN8_MAX) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer)); + +out: + up(&hba->host_sem); + return ret ? ret : count; +} + +static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled); +} + +static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int wb_enable; + ssize_t res; + + if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) { + /* + * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB + * on/off will be done while clock scaling up/down. + */ + dev_warn(dev, "To control WB through wb_on is not allowed!\n"); + return -EOPNOTSUPP; + } + + if (kstrtouint(buf, 0, &wb_enable)) + return -EINVAL; + + if (wb_enable != 0 && wb_enable != 1) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + res = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + res = ufshcd_wb_toggle(hba, wb_enable); + ufshcd_rpm_put_sync(hba); +out: + up(&hba->host_sem); + return res < 0 ? res : count; +} + +static DEVICE_ATTR_RW(rpm_lvl); +static DEVICE_ATTR_RO(rpm_target_dev_state); +static DEVICE_ATTR_RO(rpm_target_link_state); +static DEVICE_ATTR_RW(spm_lvl); +static DEVICE_ATTR_RO(spm_target_dev_state); +static DEVICE_ATTR_RO(spm_target_link_state); +static DEVICE_ATTR_RW(auto_hibern8); +static DEVICE_ATTR_RW(wb_on); + +static struct attribute *ufs_sysfs_ufshcd_attrs[] = { + &dev_attr_rpm_lvl.attr, + &dev_attr_rpm_target_dev_state.attr, + &dev_attr_rpm_target_link_state.attr, + &dev_attr_spm_lvl.attr, + &dev_attr_spm_target_dev_state.attr, + &dev_attr_spm_target_link_state.attr, + &dev_attr_auto_hibern8.attr, + &dev_attr_wb_on.attr, + NULL +}; + +static const struct attribute_group ufs_sysfs_default_group = { + .attrs = ufs_sysfs_ufshcd_attrs, +}; + +static ssize_t monitor_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->monitor.enabled); +} + +static ssize_t monitor_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long value, flags; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + value = !!value; + spin_lock_irqsave(hba->host->host_lock, flags); + if (value == hba->monitor.enabled) + goto out_unlock; + + if (!value) { + memset(&hba->monitor, 0, sizeof(hba->monitor)); + } else { + hba->monitor.enabled = true; + hba->monitor.enabled_ts = ktime_get(); + } + +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t monitor_chunk_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size); +} + +static ssize_t monitor_chunk_size_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long value, flags; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* Only allow chunk size change when monitor is disabled */ + if (!hba->monitor.enabled) + hba->monitor.chunk_size = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static ssize_t read_total_sectors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]); +} + +static ssize_t read_total_busy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.total_busy[READ])); +} + +static ssize_t read_nr_requests_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]); +} + +static ssize_t read_req_latency_avg_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_hba_monitor *m = &hba->monitor; + + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]), + m->nr_req[READ])); +} + +static ssize_t read_req_latency_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_max[READ])); +} + +static ssize_t read_req_latency_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_min[READ])); +} + +static ssize_t read_req_latency_sum_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_sum[READ])); +} + +static ssize_t write_total_sectors_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]); +} + +static ssize_t write_total_busy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.total_busy[WRITE])); +} + +static ssize_t write_nr_requests_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]); +} + +static ssize_t write_req_latency_avg_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_hba_monitor *m = &hba->monitor; + + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]), + m->nr_req[WRITE])); +} + +static ssize_t write_req_latency_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_max[WRITE])); +} + +static ssize_t write_req_latency_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_min[WRITE])); +} + +static ssize_t write_req_latency_sum_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%llu\n", + ktime_to_us(hba->monitor.lat_sum[WRITE])); +} + +static DEVICE_ATTR_RW(monitor_enable); +static DEVICE_ATTR_RW(monitor_chunk_size); +static DEVICE_ATTR_RO(read_total_sectors); +static DEVICE_ATTR_RO(read_total_busy); +static DEVICE_ATTR_RO(read_nr_requests); +static DEVICE_ATTR_RO(read_req_latency_avg); +static DEVICE_ATTR_RO(read_req_latency_max); +static DEVICE_ATTR_RO(read_req_latency_min); +static DEVICE_ATTR_RO(read_req_latency_sum); +static DEVICE_ATTR_RO(write_total_sectors); +static DEVICE_ATTR_RO(write_total_busy); +static DEVICE_ATTR_RO(write_nr_requests); +static DEVICE_ATTR_RO(write_req_latency_avg); +static DEVICE_ATTR_RO(write_req_latency_max); +static DEVICE_ATTR_RO(write_req_latency_min); +static DEVICE_ATTR_RO(write_req_latency_sum); + +static struct attribute *ufs_sysfs_monitor_attrs[] = { + &dev_attr_monitor_enable.attr, + &dev_attr_monitor_chunk_size.attr, + &dev_attr_read_total_sectors.attr, + &dev_attr_read_total_busy.attr, + &dev_attr_read_nr_requests.attr, + &dev_attr_read_req_latency_avg.attr, + &dev_attr_read_req_latency_max.attr, + &dev_attr_read_req_latency_min.attr, + &dev_attr_read_req_latency_sum.attr, + &dev_attr_write_total_sectors.attr, + &dev_attr_write_total_busy.attr, + &dev_attr_write_nr_requests.attr, + &dev_attr_write_req_latency_avg.attr, + &dev_attr_write_req_latency_max.attr, + &dev_attr_write_req_latency_min.attr, + &dev_attr_write_req_latency_sum.attr, + NULL +}; + +static const struct attribute_group ufs_sysfs_monitor_group = { + .name = "monitor", + .attrs = ufs_sysfs_monitor_attrs, +}; + +static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + u8 desc_index, + u8 param_offset, + u8 *sysfs_buf, + u8 param_size) +{ + u8 desc_buf[8] = {0}; + int ret; + + if (param_size > 8) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_read_desc_param(hba, desc_id, desc_index, + param_offset, desc_buf, param_size); + ufshcd_rpm_put_sync(hba); + if (ret) { + ret = -EINVAL; + goto out; + } + + switch (param_size) { + case 1: + ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf); + break; + case 2: + ret = sysfs_emit(sysfs_buf, "0x%04X\n", + get_unaligned_be16(desc_buf)); + break; + case 4: + ret = sysfs_emit(sysfs_buf, "0x%08X\n", + get_unaligned_be32(desc_buf)); + break; + case 8: + ret = sysfs_emit(sysfs_buf, "0x%016llX\n", + get_unaligned_be64(desc_buf)); + break; + } + +out: + up(&hba->host_sem); + return ret; +} + +#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ + 0, _duname##_DESC_PARAM##_puname, buf, _size); \ +} \ +static DEVICE_ATTR_RO(_name) + +#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, DEVICE, _size) + +UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1); +UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1); +UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1); +UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1); +UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1); +UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1); +UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1); +UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1); +UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1); +UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1); +UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1); +UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1); +UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1); +UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1); +UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2); +UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2); +UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2); +UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1); +UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2); +UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1); +UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1); +UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1); +UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2); +UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1); +UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4); +UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1); +UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2); +UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1); +UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4); +UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1); +UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1); +UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4); + +static struct attribute *ufs_sysfs_device_descriptor[] = { + &dev_attr_device_type.attr, + &dev_attr_device_class.attr, + &dev_attr_device_sub_class.attr, + &dev_attr_protocol.attr, + &dev_attr_number_of_luns.attr, + &dev_attr_number_of_wluns.attr, + &dev_attr_boot_enable.attr, + &dev_attr_descriptor_access_enable.attr, + &dev_attr_initial_power_mode.attr, + &dev_attr_high_priority_lun.attr, + &dev_attr_secure_removal_type.attr, + &dev_attr_support_security_lun.attr, + &dev_attr_bkops_termination_latency.attr, + &dev_attr_initial_active_icc_level.attr, + &dev_attr_specification_version.attr, + &dev_attr_manufacturing_date.attr, + &dev_attr_manufacturer_id.attr, + &dev_attr_rtt_capability.attr, + &dev_attr_rtc_update.attr, + &dev_attr_ufs_features.attr, + &dev_attr_ffu_timeout.attr, + &dev_attr_queue_depth.attr, + &dev_attr_device_version.attr, + &dev_attr_number_of_secure_wpa.attr, + &dev_attr_psa_max_data_size.attr, + &dev_attr_psa_state_timeout.attr, + &dev_attr_hpb_version.attr, + &dev_attr_hpb_control.attr, + &dev_attr_ext_feature_sup.attr, + &dev_attr_wb_presv_us_en.attr, + &dev_attr_wb_type.attr, + &dev_attr_wb_shared_alloc_units.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_device_descriptor_group = { + .name = "device_descriptor", + .attrs = ufs_sysfs_device_descriptor, +}; + +#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size) + +UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2); +UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2); + +static struct attribute *ufs_sysfs_interconnect_descriptor[] = { + &dev_attr_unipro_version.attr, + &dev_attr_mphy_version.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = { + .name = "interconnect_descriptor", + .attrs = ufs_sysfs_interconnect_descriptor, +}; + +#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size) + +UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8); +UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1); +UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4); +UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1); +UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1); +UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1); +UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1); +UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2); +UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units, + _SCM_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor, + _SCM_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units, + _NPM_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor, + _NPM_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units, + _ENM1_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor, + _ENM1_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units, + _ENM2_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor, + _ENM2_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units, + _ENM3_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor, + _ENM3_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units, + _ENM4_MAX_NUM_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor, + _ENM4_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2); +UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4); +UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1); +UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1); +UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1); +UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1); + + +static struct attribute *ufs_sysfs_geometry_descriptor[] = { + &dev_attr_raw_device_capacity.attr, + &dev_attr_max_number_of_luns.attr, + &dev_attr_segment_size.attr, + &dev_attr_allocation_unit_size.attr, + &dev_attr_min_addressable_block_size.attr, + &dev_attr_optimal_read_block_size.attr, + &dev_attr_optimal_write_block_size.attr, + &dev_attr_max_in_buffer_size.attr, + &dev_attr_max_out_buffer_size.attr, + &dev_attr_rpmb_rw_size.attr, + &dev_attr_dyn_capacity_resource_policy.attr, + &dev_attr_data_ordering.attr, + &dev_attr_max_number_of_contexts.attr, + &dev_attr_sys_data_tag_unit_size.attr, + &dev_attr_sys_data_tag_resource_size.attr, + &dev_attr_secure_removal_types.attr, + &dev_attr_memory_types.attr, + &dev_attr_sys_code_memory_max_alloc_units.attr, + &dev_attr_sys_code_memory_capacity_adjustment_factor.attr, + &dev_attr_non_persist_memory_max_alloc_units.attr, + &dev_attr_non_persist_memory_capacity_adjustment_factor.attr, + &dev_attr_enh1_memory_max_alloc_units.attr, + &dev_attr_enh1_memory_capacity_adjustment_factor.attr, + &dev_attr_enh2_memory_max_alloc_units.attr, + &dev_attr_enh2_memory_capacity_adjustment_factor.attr, + &dev_attr_enh3_memory_max_alloc_units.attr, + &dev_attr_enh3_memory_capacity_adjustment_factor.attr, + &dev_attr_enh4_memory_max_alloc_units.attr, + &dev_attr_enh4_memory_capacity_adjustment_factor.attr, + &dev_attr_hpb_region_size.attr, + &dev_attr_hpb_number_lu.attr, + &dev_attr_hpb_subregion_size.attr, + &dev_attr_hpb_max_active_regions.attr, + &dev_attr_wb_max_alloc_units.attr, + &dev_attr_wb_max_wb_luns.attr, + &dev_attr_wb_buff_cap_adj.attr, + &dev_attr_wb_sup_red_type.attr, + &dev_attr_wb_sup_wb_type.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_geometry_descriptor_group = { + .name = "geometry_descriptor", + .attrs = ufs_sysfs_geometry_descriptor, +}; + +#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \ + UFS_DESC_PARAM(_name, _uname, HEALTH, _size) + +UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1); +UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1); +UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1); + +static struct attribute *ufs_sysfs_health_descriptor[] = { + &dev_attr_eol_info.attr, + &dev_attr_life_time_estimation_a.attr, + &dev_attr_life_time_estimation_b.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_health_descriptor_group = { + .name = "health_descriptor", + .attrs = ufs_sysfs_health_descriptor, +}; + +#define UFS_POWER_DESC_PARAM(_name, _uname, _index) \ +static ssize_t _name##_index##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \ + PWR_DESC##_uname##_0 + _index * 2, buf, 2); \ +} \ +static DEVICE_ATTR_RO(_name##_index) + +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14); +UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14); +UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15); + +static struct attribute *ufs_sysfs_power_descriptor[] = { + &dev_attr_active_icc_levels_vcc0.attr, + &dev_attr_active_icc_levels_vcc1.attr, + &dev_attr_active_icc_levels_vcc2.attr, + &dev_attr_active_icc_levels_vcc3.attr, + &dev_attr_active_icc_levels_vcc4.attr, + &dev_attr_active_icc_levels_vcc5.attr, + &dev_attr_active_icc_levels_vcc6.attr, + &dev_attr_active_icc_levels_vcc7.attr, + &dev_attr_active_icc_levels_vcc8.attr, + &dev_attr_active_icc_levels_vcc9.attr, + &dev_attr_active_icc_levels_vcc10.attr, + &dev_attr_active_icc_levels_vcc11.attr, + &dev_attr_active_icc_levels_vcc12.attr, + &dev_attr_active_icc_levels_vcc13.attr, + &dev_attr_active_icc_levels_vcc14.attr, + &dev_attr_active_icc_levels_vcc15.attr, + &dev_attr_active_icc_levels_vccq0.attr, + &dev_attr_active_icc_levels_vccq1.attr, + &dev_attr_active_icc_levels_vccq2.attr, + &dev_attr_active_icc_levels_vccq3.attr, + &dev_attr_active_icc_levels_vccq4.attr, + &dev_attr_active_icc_levels_vccq5.attr, + &dev_attr_active_icc_levels_vccq6.attr, + &dev_attr_active_icc_levels_vccq7.attr, + &dev_attr_active_icc_levels_vccq8.attr, + &dev_attr_active_icc_levels_vccq9.attr, + &dev_attr_active_icc_levels_vccq10.attr, + &dev_attr_active_icc_levels_vccq11.attr, + &dev_attr_active_icc_levels_vccq12.attr, + &dev_attr_active_icc_levels_vccq13.attr, + &dev_attr_active_icc_levels_vccq14.attr, + &dev_attr_active_icc_levels_vccq15.attr, + &dev_attr_active_icc_levels_vccq20.attr, + &dev_attr_active_icc_levels_vccq21.attr, + &dev_attr_active_icc_levels_vccq22.attr, + &dev_attr_active_icc_levels_vccq23.attr, + &dev_attr_active_icc_levels_vccq24.attr, + &dev_attr_active_icc_levels_vccq25.attr, + &dev_attr_active_icc_levels_vccq26.attr, + &dev_attr_active_icc_levels_vccq27.attr, + &dev_attr_active_icc_levels_vccq28.attr, + &dev_attr_active_icc_levels_vccq29.attr, + &dev_attr_active_icc_levels_vccq210.attr, + &dev_attr_active_icc_levels_vccq211.attr, + &dev_attr_active_icc_levels_vccq212.attr, + &dev_attr_active_icc_levels_vccq213.attr, + &dev_attr_active_icc_levels_vccq214.attr, + &dev_attr_active_icc_levels_vccq215.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_power_descriptor_group = { + .name = "power_descriptor", + .attrs = ufs_sysfs_power_descriptor, +}; + +#define UFS_STRING_DESCRIPTOR(_name, _pname) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + u8 index; \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + int ret; \ + int desc_len = QUERY_DESC_MAX_SIZE; \ + u8 *desc_buf; \ + \ + down(&hba->host_sem); \ + if (!ufshcd_is_user_access_allowed(hba)) { \ + up(&hba->host_sem); \ + return -EBUSY; \ + } \ + desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ + if (!desc_buf) { \ + up(&hba->host_sem); \ + return -ENOMEM; \ + } \ + ufshcd_rpm_get_sync(hba); \ + ret = ufshcd_query_descriptor_retry(hba, \ + UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ + 0, 0, desc_buf, &desc_len); \ + if (ret) { \ + ret = -EINVAL; \ + goto out; \ + } \ + index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ + kfree(desc_buf); \ + desc_buf = NULL; \ + ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ + SD_ASCII_STD); \ + if (ret < 0) \ + goto out; \ + ret = sysfs_emit(buf, "%s\n", desc_buf); \ +out: \ + ufshcd_rpm_put_sync(hba); \ + kfree(desc_buf); \ + up(&hba->host_sem); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(_name) + +UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME); +UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME); +UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID); +UFS_STRING_DESCRIPTOR(serial_number, _SN); +UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV); + +static struct attribute *ufs_sysfs_string_descriptors[] = { + &dev_attr_manufacturer_name.attr, + &dev_attr_product_name.attr, + &dev_attr_oem_id.attr, + &dev_attr_serial_number.attr, + &dev_attr_product_revision.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_string_descriptors_group = { + .name = "string_descriptors", + .attrs = ufs_sysfs_string_descriptors, +}; + +static inline bool ufshcd_is_wb_flags(enum flag_idn idn) +{ + return idn >= QUERY_FLAG_IDN_WB_EN && + idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8; +} + +#define UFS_FLAG(_name, _uname) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + bool flag; \ + u8 index = 0; \ + int ret; \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + \ + down(&hba->host_sem); \ + if (!ufshcd_is_user_access_allowed(hba)) { \ + up(&hba->host_sem); \ + return -EBUSY; \ + } \ + if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \ + index = ufshcd_wb_get_query_index(hba); \ + ufshcd_rpm_get_sync(hba); \ + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \ + QUERY_FLAG_IDN##_uname, index, &flag); \ + ufshcd_rpm_put_sync(hba); \ + if (ret) { \ + ret = -EINVAL; \ + goto out; \ + } \ + ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \ +out: \ + up(&hba->host_sem); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(_name) + +UFS_FLAG(device_init, _FDEVICEINIT); +UFS_FLAG(permanent_wpe, _PERMANENT_WPE); +UFS_FLAG(power_on_wpe, _PWR_ON_WPE); +UFS_FLAG(bkops_enable, _BKOPS_EN); +UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE); +UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL); +UFS_FLAG(busy_rtc, _BUSY_RTC); +UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE); +UFS_FLAG(wb_enable, _WB_EN); +UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN); +UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8); +UFS_FLAG(hpb_enable, _HPB_EN); + +static struct attribute *ufs_sysfs_device_flags[] = { + &dev_attr_device_init.attr, + &dev_attr_permanent_wpe.attr, + &dev_attr_power_on_wpe.attr, + &dev_attr_bkops_enable.attr, + &dev_attr_life_span_mode_enable.attr, + &dev_attr_phy_resource_removal.attr, + &dev_attr_busy_rtc.attr, + &dev_attr_disable_fw_update.attr, + &dev_attr_wb_enable.attr, + &dev_attr_wb_flush_en.attr, + &dev_attr_wb_flush_during_h8.attr, + &dev_attr_hpb_enable.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_flags_group = { + .name = "flags", + .attrs = ufs_sysfs_device_flags, +}; + +static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) +{ + return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS && + idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; +} + +#define UFS_ATTRIBUTE(_name, _uname) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ufs_hba *hba = dev_get_drvdata(dev); \ + u32 value; \ + int ret; \ + u8 index = 0; \ + \ + down(&hba->host_sem); \ + if (!ufshcd_is_user_access_allowed(hba)) { \ + up(&hba->host_sem); \ + return -EBUSY; \ + } \ + if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \ + index = ufshcd_wb_get_query_index(hba); \ + ufshcd_rpm_get_sync(hba); \ + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \ + QUERY_ATTR_IDN##_uname, index, 0, &value); \ + ufshcd_rpm_put_sync(hba); \ + if (ret) { \ + ret = -EINVAL; \ + goto out; \ + } \ + ret = sysfs_emit(buf, "0x%08X\n", value); \ +out: \ + up(&hba->host_sem); \ + return ret; \ +} \ +static DEVICE_ATTR_RO(_name) + +UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN); +UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD); +UFS_ATTRIBUTE(current_power_mode, _POWER_MODE); +UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL); +UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN); +UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS); +UFS_ATTRIBUTE(purge_status, _PURGE_STATUS); +UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN); +UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT); +UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ); +UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK); +UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT); +UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL); +UFS_ATTRIBUTE(exception_event_status, _EE_STATUS); +UFS_ATTRIBUTE(ffu_status, _FFU_STATUS); +UFS_ATTRIBUTE(psa_state, _PSA_STATE); +UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE); +UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS); +UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE); +UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST); +UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE); + + +static struct attribute *ufs_sysfs_attributes[] = { + &dev_attr_boot_lun_enabled.attr, + &dev_attr_max_data_size_hpb_single_cmd.attr, + &dev_attr_current_power_mode.attr, + &dev_attr_active_icc_level.attr, + &dev_attr_ooo_data_enabled.attr, + &dev_attr_bkops_status.attr, + &dev_attr_purge_status.attr, + &dev_attr_max_data_in_size.attr, + &dev_attr_max_data_out_size.attr, + &dev_attr_reference_clock_frequency.attr, + &dev_attr_configuration_descriptor_lock.attr, + &dev_attr_max_number_of_rtt.attr, + &dev_attr_exception_event_control.attr, + &dev_attr_exception_event_status.attr, + &dev_attr_ffu_status.attr, + &dev_attr_psa_state.attr, + &dev_attr_psa_data_size.attr, + &dev_attr_wb_flush_status.attr, + &dev_attr_wb_avail_buf.attr, + &dev_attr_wb_life_time_est.attr, + &dev_attr_wb_cur_buf.attr, + NULL, +}; + +static const struct attribute_group ufs_sysfs_attributes_group = { + .name = "attributes", + .attrs = ufs_sysfs_attributes, +}; + +static const struct attribute_group *ufs_sysfs_groups[] = { + &ufs_sysfs_default_group, + &ufs_sysfs_monitor_group, + &ufs_sysfs_device_descriptor_group, + &ufs_sysfs_interconnect_descriptor_group, + &ufs_sysfs_geometry_descriptor_group, + &ufs_sysfs_health_descriptor_group, + &ufs_sysfs_power_descriptor_group, + &ufs_sysfs_string_descriptors_group, + &ufs_sysfs_flags_group, + &ufs_sysfs_attributes_group, + NULL, +}; + +#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \ +static ssize_t _pname##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufs_hba *hba = shost_priv(sdev->host); \ + u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \ + _duname##_DESC_PARAM##_puname)) \ + return -EINVAL; \ + return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ + lun, _duname##_DESC_PARAM##_puname, buf, _size); \ +} \ +static DEVICE_ATTR_RO(_pname) + +#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \ + UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size) + +UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1); +UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1); +UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1); +UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1); +UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1); +UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1); +UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1); +UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); +UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); +UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); +UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); +UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); +UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); +UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2); +UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2); +UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2); +UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); + +static struct attribute *ufs_sysfs_unit_descriptor[] = { + &dev_attr_lu_enable.attr, + &dev_attr_boot_lun_id.attr, + &dev_attr_lun_write_protect.attr, + &dev_attr_lun_queue_depth.attr, + &dev_attr_psa_sensitive.attr, + &dev_attr_lun_memory_type.attr, + &dev_attr_data_reliability.attr, + &dev_attr_logical_block_size.attr, + &dev_attr_logical_block_count.attr, + &dev_attr_erase_block_size.attr, + &dev_attr_provisioning_type.attr, + &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_context_capabilities.attr, + &dev_attr_large_unit_granularity.attr, + &dev_attr_hpb_lu_max_active_regions.attr, + &dev_attr_hpb_pinned_region_start_offset.attr, + &dev_attr_hpb_number_pinned_regions.attr, + &dev_attr_wb_buf_alloc_units.attr, + NULL, +}; + +const struct attribute_group ufs_sysfs_unit_descriptor_group = { + .name = "unit_descriptor", + .attrs = ufs_sysfs_unit_descriptor, +}; + +static ssize_t dyn_cap_needed_attribute_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 value; + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba = shost_priv(sdev->host); + u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + ret = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value); + ufshcd_rpm_put_sync(hba); + if (ret) { + ret = -EINVAL; + goto out; + } + + ret = sysfs_emit(buf, "0x%08X\n", value); + +out: + up(&hba->host_sem); + return ret; +} +static DEVICE_ATTR_RO(dyn_cap_needed_attribute); + +static struct attribute *ufs_sysfs_lun_attributes[] = { + &dev_attr_dyn_cap_needed_attribute.attr, + NULL, +}; + +const struct attribute_group ufs_sysfs_lun_attributes_group = { + .attrs = ufs_sysfs_lun_attributes, +}; + +void ufs_sysfs_add_nodes(struct device *dev) +{ + int ret; + + ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups); + if (ret) + dev_err(dev, + "%s: sysfs groups creation failed (err = %d)\n", + __func__, ret); +} + +void ufs_sysfs_remove_nodes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups); +} diff --git a/drivers/ufs/core/ufs-sysfs.h b/drivers/ufs/core/ufs-sysfs.h new file mode 100644 index 000000000000..8d94af3b8077 --- /dev/null +++ b/drivers/ufs/core/ufs-sysfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Western Digital Corporation + */ + +#ifndef __UFS_SYSFS_H__ +#define __UFS_SYSFS_H__ + +#include <linux/sysfs.h> + +struct device; + +void ufs_sysfs_add_nodes(struct device *dev); +void ufs_sysfs_remove_nodes(struct device *dev); + +extern const struct attribute_group ufs_sysfs_unit_descriptor_group; +extern const struct attribute_group ufs_sysfs_lun_attributes_group; + +#endif diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c new file mode 100644 index 000000000000..b99e3f3dc4ef --- /dev/null +++ b/drivers/ufs/core/ufs_bsg.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * bsg endpoint that supports UPIUs + * + * Copyright (C) 2018 Western Digital Corporation + */ + +#include <linux/bsg-lib.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include "ufs_bsg.h" +#include <ufs/ufshcd.h> +#include "ufshcd-priv.h" + +static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len, + struct utp_upiu_query *qr) +{ + int desc_size = be16_to_cpu(qr->length); + int desc_id = qr->idn; + + if (desc_size <= 0) + return -EINVAL; + + ufshcd_map_desc_id_to_length(hba, desc_id, desc_len); + if (!*desc_len) + return -EINVAL; + + *desc_len = min_t(int, *desc_len, desc_size); + + return 0; +} + +static int ufs_bsg_verify_query_size(struct ufs_hba *hba, + unsigned int request_len, + unsigned int reply_len) +{ + int min_req_len = sizeof(struct ufs_bsg_request); + int min_rsp_len = sizeof(struct ufs_bsg_reply); + + if (min_req_len > request_len || min_rsp_len > reply_len) { + dev_err(hba->dev, "not enough space assigned\n"); + return -EINVAL; + } + + return 0; +} + +static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, + uint8_t **desc_buff, int *desc_len, + enum query_opcode desc_op) +{ + struct ufs_bsg_request *bsg_request = job->request; + struct utp_upiu_query *qr; + u8 *descp; + + if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC && + desc_op != UPIU_QUERY_OPCODE_READ_DESC) + goto out; + + qr = &bsg_request->upiu_req.qr; + if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) { + dev_err(hba->dev, "Illegal desc size\n"); + return -EINVAL; + } + + if (*desc_len > job->request_payload.payload_len) { + dev_err(hba->dev, "Illegal desc size\n"); + return -EINVAL; + } + + descp = kzalloc(*desc_len, GFP_KERNEL); + if (!descp) + return -ENOMEM; + + if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, descp, + *desc_len); + + *desc_buff = descp; + +out: + return 0; +} + +static int ufs_bsg_request(struct bsg_job *job) +{ + struct ufs_bsg_request *bsg_request = job->request; + struct ufs_bsg_reply *bsg_reply = job->reply; + struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); + unsigned int req_len = job->request_len; + unsigned int reply_len = job->reply_len; + struct uic_command uc = {}; + int msgcode; + uint8_t *desc_buff = NULL; + int desc_len = 0; + enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP; + int ret; + + ret = ufs_bsg_verify_query_size(hba, req_len, reply_len); + if (ret) + goto out; + + bsg_reply->reply_payload_rcv_len = 0; + + ufshcd_rpm_get_sync(hba); + + msgcode = bsg_request->msgcode; + switch (msgcode) { + case UPIU_TRANSACTION_QUERY_REQ: + desc_op = bsg_request->upiu_req.qr.opcode; + ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, + &desc_len, desc_op); + if (ret) { + ufshcd_rpm_put_sync(hba); + goto out; + } + + fallthrough; + case UPIU_TRANSACTION_NOP_OUT: + case UPIU_TRANSACTION_TASK_REQ: + ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req, + &bsg_reply->upiu_rsp, msgcode, + desc_buff, &desc_len, desc_op); + if (ret) + dev_err(hba->dev, + "exe raw upiu: error code %d\n", ret); + + break; + case UPIU_TRANSACTION_UIC_CMD: + memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE); + ret = ufshcd_send_uic_cmd(hba, &uc); + if (ret) + dev_err(hba->dev, + "send uic cmd: error code %d\n", ret); + + memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE); + + break; + default: + ret = -ENOTSUPP; + dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode); + + break; + } + + ufshcd_rpm_put_sync(hba); + + if (!desc_buff) + goto out; + + if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + desc_buff, desc_len); + + kfree(desc_buff); + +out: + bsg_reply->result = ret; + job->reply_len = sizeof(struct ufs_bsg_reply); + /* complete the job here only if no error */ + if (ret == 0) + bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); + + return ret; +} + +/** + * ufs_bsg_remove - detach and remove the added ufs-bsg node + * @hba: per adapter object + * + * Should be called when unloading the driver. + */ +void ufs_bsg_remove(struct ufs_hba *hba) +{ + struct device *bsg_dev = &hba->bsg_dev; + + if (!hba->bsg_queue) + return; + + bsg_remove_queue(hba->bsg_queue); + + device_del(bsg_dev); + put_device(bsg_dev); +} + +static inline void ufs_bsg_node_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ufs_bsg_probe - Add ufs bsg device node + * @hba: per adapter object + * + * Called during initial loading of the driver, and before scsi_scan_host. + */ +int ufs_bsg_probe(struct ufs_hba *hba) +{ + struct device *bsg_dev = &hba->bsg_dev; + struct Scsi_Host *shost = hba->host; + struct device *parent = &shost->shost_gendev; + struct request_queue *q; + int ret; + + device_initialize(bsg_dev); + + bsg_dev->parent = get_device(parent); + bsg_dev->release = ufs_bsg_node_release; + + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); + + ret = device_add(bsg_dev); + if (ret) + goto out; + + q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0); + if (IS_ERR(q)) { + ret = PTR_ERR(q); + goto out; + } + + hba->bsg_queue = q; + + return 0; + +out: + dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no); + put_device(bsg_dev); + return ret; +} diff --git a/drivers/ufs/core/ufs_bsg.h b/drivers/ufs/core/ufs_bsg.h new file mode 100644 index 000000000000..57712d2656d2 --- /dev/null +++ b/drivers/ufs/core/ufs_bsg.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Western Digital Corporation + */ +#ifndef UFS_BSG_H +#define UFS_BSG_H + +struct ufs_hba; + +#ifdef CONFIG_SCSI_UFS_BSG +void ufs_bsg_remove(struct ufs_hba *hba); +int ufs_bsg_probe(struct ufs_hba *hba); +#else +static inline void ufs_bsg_remove(struct ufs_hba *hba) {} +static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; } +#endif + +#endif /* UFS_BSG_H */ diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c new file mode 100644 index 000000000000..198360fe5e8e --- /dev/null +++ b/drivers/ufs/core/ufshcd-crypto.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +#include <ufs/ufshcd.h> +#include "ufshcd-crypto.h" + +/* Blk-crypto modes supported by UFS crypto */ +static const struct ufs_crypto_alg_entry { + enum ufs_crypto_alg ufs_alg; + enum ufs_crypto_key_size ufs_key_size; +} ufs_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = { + [BLK_ENCRYPTION_MODE_AES_256_XTS] = { + .ufs_alg = UFS_CRYPTO_ALG_AES_XTS, + .ufs_key_size = UFS_CRYPTO_KEY_SIZE_256, + }, +}; + +static int ufshcd_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + int i; + u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); + int err = 0; + + ufshcd_hold(hba, false); + + if (hba->vops && hba->vops->program_key) { + err = hba->vops->program_key(hba, cfg, slot); + goto out; + } + + /* Ensure that CFGE is cleared before programming the key */ + ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); + for (i = 0; i < 16; i++) { + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]), + slot_offset + i * sizeof(cfg->reg_val[0])); + } + /* Write dword 17 */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]), + slot_offset + 17 * sizeof(cfg->reg_val[0])); + /* Dword 16 must be written last */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]), + slot_offset + 16 * sizeof(cfg->reg_val[0])); +out: + ufshcd_release(hba); + return err; +} + +static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); + const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; + const struct ufs_crypto_alg_entry *alg = + &ufs_crypto_algs[key->crypto_cfg.crypto_mode]; + u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512; + int i; + int cap_idx = -1; + union ufs_crypto_cfg_entry cfg = {}; + int err; + + BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); + for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) { + if (ccap_array[i].algorithm_id == alg->ufs_alg && + ccap_array[i].key_size == alg->ufs_key_size && + (ccap_array[i].sdus_mask & data_unit_mask)) { + cap_idx = i; + break; + } + } + + if (WARN_ON(cap_idx < 0)) + return -EOPNOTSUPP; + + cfg.data_unit_size = data_unit_mask; + cfg.crypto_cap_idx = cap_idx; + cfg.config_enable = UFS_CRYPTO_CONFIGURATION_ENABLE; + + if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) { + /* In XTS mode, the blk_crypto_key's size is already doubled */ + memcpy(cfg.crypto_key, key->raw, key->size/2); + memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2, + key->raw + key->size/2, key->size/2); + } else { + memcpy(cfg.crypto_key, key->raw, key->size); + } + + err = ufshcd_program_key(hba, &cfg, slot); + + memzero_explicit(&cfg, sizeof(cfg)); + return err; +} + +static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) +{ + /* + * Clear the crypto cfg on the device. Clearing CFGE + * might not be sufficient, so just clear the entire cfg. + */ + union ufs_crypto_cfg_entry cfg = {}; + + return ufshcd_program_key(hba, &cfg, slot); +} + +static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); + + return ufshcd_clear_keyslot(hba, slot); +} + +bool ufshcd_crypto_enable(struct ufs_hba *hba) +{ + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return false; + + /* Reset might clear all keys, so reprogram all the keys. */ + blk_crypto_reprogram_all_keys(&hba->crypto_profile); + return true; +} + +static const struct blk_crypto_ll_ops ufshcd_crypto_ops = { + .keyslot_program = ufshcd_crypto_keyslot_program, + .keyslot_evict = ufshcd_crypto_keyslot_evict, +}; + +static enum blk_crypto_mode_num +ufshcd_find_blk_crypto_mode(union ufs_crypto_cap_entry cap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ufs_crypto_algs); i++) { + BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); + if (ufs_crypto_algs[i].ufs_alg == cap.algorithm_id && + ufs_crypto_algs[i].ufs_key_size == cap.key_size) { + return i; + } + } + return BLK_ENCRYPTION_MODE_INVALID; +} + +/** + * ufshcd_hba_init_crypto_capabilities - Read crypto capabilities, init crypto + * fields in hba + * @hba: Per adapter instance + * + * Return: 0 if crypto was initialized or is not supported, else a -errno value. + */ +int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) +{ + int cap_idx; + int err = 0; + enum blk_crypto_mode_num blk_mode_num; + + /* + * Don't use crypto if either the hardware doesn't advertise the + * standard crypto capability bit *or* if the vendor specific driver + * hasn't advertised that crypto is supported. + */ + if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) || + !(hba->caps & UFSHCD_CAP_CRYPTO)) + goto out; + + hba->crypto_capabilities.reg_val = + cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + hba->crypto_cfg_register = + (u32)hba->crypto_capabilities.config_array_ptr * 0x100; + hba->crypto_cap_array = + devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap, + sizeof(hba->crypto_cap_array[0]), GFP_KERNEL); + if (!hba->crypto_cap_array) { + err = -ENOMEM; + goto out; + } + + /* The actual number of configurations supported is (CFGC+1) */ + err = devm_blk_crypto_profile_init( + hba->dev, &hba->crypto_profile, + hba->crypto_capabilities.config_count + 1); + if (err) + goto out; + + hba->crypto_profile.ll_ops = ufshcd_crypto_ops; + /* UFS only supports 8 bytes for any DUN */ + hba->crypto_profile.max_dun_bytes_supported = 8; + hba->crypto_profile.dev = hba->dev; + + /* + * Cache all the UFS crypto capabilities and advertise the supported + * crypto modes and data unit sizes to the block layer. + */ + for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; + cap_idx++) { + hba->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = ufshcd_find_blk_crypto_mode( + hba->crypto_cap_array[cap_idx]); + if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID) + hba->crypto_profile.modes_supported[blk_mode_num] |= + hba->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + return 0; + +out: + /* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */ + hba->caps &= ~UFSHCD_CAP_CRYPTO; + return err; +} + +/** + * ufshcd_init_crypto - Initialize crypto hardware + * @hba: Per adapter instance + */ +void ufshcd_init_crypto(struct ufs_hba *hba) +{ + int slot; + + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return; + + /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ + for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) + ufshcd_clear_keyslot(hba, slot); +} + +void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q) +{ + if (hba->caps & UFSHCD_CAP_CRYPTO) + blk_crypto_register(&hba->crypto_profile, q); +} diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h new file mode 100644 index 000000000000..504cc841540b --- /dev/null +++ b/drivers/ufs/core/ufshcd-crypto.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef _UFSHCD_CRYPTO_H +#define _UFSHCD_CRYPTO_H + +#include <scsi/scsi_cmnd.h> +#include <ufs/ufshcd.h> +#include "ufshcd-priv.h" +#include <ufs/ufshci.h> + +#ifdef CONFIG_SCSI_UFS_CRYPTO + +static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, + struct ufshcd_lrb *lrbp) +{ + if (!rq || !rq->crypt_keyslot) { + lrbp->crypto_key_slot = -1; + return; + } + + lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot); + lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0]; +} + +static inline void +ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0, + u32 *dword_1, u32 *dword_3) +{ + if (lrbp->crypto_key_slot >= 0) { + *dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD; + *dword_0 |= lrbp->crypto_key_slot; + *dword_1 = lower_32_bits(lrbp->data_unit_num); + *dword_3 = upper_32_bits(lrbp->data_unit_num); + } +} + +bool ufshcd_crypto_enable(struct ufs_hba *hba); + +int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); + +void ufshcd_init_crypto(struct ufs_hba *hba); + +void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q); + +#else /* CONFIG_SCSI_UFS_CRYPTO */ + +static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, + struct ufshcd_lrb *lrbp) { } + +static inline void +ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0, + u32 *dword_1, u32 *dword_3) { } + +static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) +{ + return false; +} + +static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) +{ + return 0; +} + +static inline void ufshcd_init_crypto(struct ufs_hba *hba) { } + +static inline void ufshcd_crypto_register(struct ufs_hba *hba, + struct request_queue *q) { } + +#endif /* CONFIG_SCSI_UFS_CRYPTO */ + +#endif /* _UFSHCD_CRYPTO_H */ diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h new file mode 100644 index 000000000000..ffb01fc6de75 --- /dev/null +++ b/drivers/ufs/core/ufshcd-priv.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _UFSHCD_PRIV_H_ +#define _UFSHCD_PRIV_H_ + +#include <linux/pm_runtime.h> +#include <ufs/ufshcd.h> + +static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba) +{ + return !hba->shutting_down; +} + +void ufshcd_schedule_eh_work(struct ufs_hba *hba); + +static inline bool ufshcd_keep_autobkops_enabled_except_suspend( + struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; +} + +static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba) +{ + if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) + return hba->dev_info.wb_dedicated_lu; + return 0; +} + +#ifdef CONFIG_SCSI_UFS_HWMON +void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask); +void ufs_hwmon_remove(struct ufs_hba *hba); +void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask); +#else +static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {} +static inline void ufs_hwmon_remove(struct ufs_hba *hba) {} +static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {} +#endif + +int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 param_offset, + u8 *param_read_buf, + u8 param_size); +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val); +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 index, bool *flag_res); +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); + +#define SD_ASCII_STD true +#define SD_RAW false +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii); + +int ufshcd_hold(struct ufs_hba *hba, bool async); +void ufshcd_release(struct ufs_hba *hba); + +void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_length); + +int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); + +int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, + struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, + int msgcode, + u8 *desc_buff, int *buff_len, + enum query_opcode desc_op); + +int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); + +/* Wrapper functions for safely calling variant operations */ +static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) +{ + if (hba->vops) + return hba->vops->name; + return ""; +} + +static inline void ufshcd_vops_exit(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->exit) + return hba->vops->exit(hba); +} + +static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->get_ufs_hci_version) + return hba->vops->get_ufs_hci_version(hba); + + return ufshcd_readl(hba, REG_UFS_VERSION); +} + +static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, + bool up, enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->clk_scale_notify) + return hba->vops->clk_scale_notify(hba, up, status); + return 0; +} + +static inline void ufshcd_vops_event_notify(struct ufs_hba *hba, + enum ufs_event_type evt, + void *data) +{ + if (hba->vops && hba->vops->event_notify) + hba->vops->event_notify(hba, evt, data); +} + +static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->setup_clocks) + return hba->vops->setup_clocks(hba, on, status); + return 0; +} + +static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->vops && hba->vops->hce_enable_notify) + return hba->vops->hce_enable_notify(hba, status); + + return 0; +} +static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->vops && hba->vops->link_startup_notify) + return hba->vops->link_startup_notify(hba, status); + + return 0; +} + +static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + if (hba->vops && hba->vops->pwr_change_notify) + return hba->vops->pwr_change_notify(hba, status, + dev_max_params, dev_req_params); + + return -ENOTSUPP; +} + +static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, + int tag, u8 tm_function) +{ + if (hba->vops && hba->vops->setup_task_mgmt) + return hba->vops->setup_task_mgmt(hba, tag, tm_function); +} + +static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, + enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->hibern8_notify) + return hba->vops->hibern8_notify(hba, cmd, status); +} + +static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->apply_dev_quirks) + return hba->vops->apply_dev_quirks(hba); + return 0; +} + +static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->fixup_dev_quirks) + hba->vops->fixup_dev_quirks(hba); +} + +static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op, + enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->suspend) + return hba->vops->suspend(hba, op, status); + + return 0; +} + +static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) +{ + if (hba->vops && hba->vops->resume) + return hba->vops->resume(hba, op); + + return 0; +} + +static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->dbg_register_dump) + hba->vops->dbg_register_dump(hba); +} + +static inline int ufshcd_vops_device_reset(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->device_reset) + return hba->vops->device_reset(hba); + + return -EOPNOTSUPP; +} + +static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, + struct devfreq_dev_profile *p, + struct devfreq_simple_ondemand_data *data) +{ + if (hba->vops && hba->vops->config_scaling_param) + hba->vops->config_scaling_param(hba, p, data); +} + +extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; + +/** + * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN + * @scsi_lun: scsi LUN id + * + * Returns UPIU LUN id + */ +static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) +{ + if (scsi_is_wlun(scsi_lun)) + return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) + | UFS_UPIU_WLUN_ID; + else + return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; +} + +int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask); +int ufshcd_write_ee_control(struct ufs_hba *hba); +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask, + u16 set, u16 clr); + +static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba, + u16 set, u16 clr) +{ + return ufshcd_update_ee_control(hba, &hba->ee_drv_mask, + &hba->ee_usr_mask, set, clr); +} + +static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba, + u16 set, u16 clr) +{ + return ufshcd_update_ee_control(hba, &hba->ee_usr_mask, + &hba->ee_drv_mask, set, clr); +} + +static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) +{ + return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) +{ + return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba) +{ + pm_runtime_get_noresume(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline int ufshcd_rpm_resume(struct ufs_hba *hba) +{ + return pm_runtime_resume(&hba->ufs_device_wlun->sdev_gendev); +} + +static inline int ufshcd_rpm_put(struct ufs_hba *hba) +{ + return pm_runtime_put(&hba->ufs_device_wlun->sdev_gendev); +} + +/** + * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor + * @dev_info: pointer of instance of struct ufs_dev_info + * @lun: LU number to check + * @return: true if the lun has a matching unit descriptor, false otherwise + */ +static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, + u8 lun, u8 param_offset) +{ + if (!dev_info || !dev_info->max_lu_supported) { + pr_err("Max General LU supported by UFS isn't initialized\n"); + return false; + } + /* WB is available only for the logical unit from 0 to 7 */ + if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS) + return lun < UFS_UPIU_MAX_WB_LUN_ID; + return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); +} + +#endif /* _UFSHCD_PRIV_H_ */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c new file mode 100644 index 000000000000..01fb4bad86be --- /dev/null +++ b/drivers/ufs/core/ufshcd.c @@ -0,0 +1,9941 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Universal Flash Storage Host controller driver Core + * Copyright (C) 2011-2013 Samsung India Software Operations + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + */ + +#include <linux/async.h> +#include <linux/devfreq.h> +#include <linux/nls.h> +#include <linux/of.h> +#include <linux/bitfield.h> +#include <linux/blk-pm.h> +#include <linux/blkdev.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_dbg.h> +#include <scsi/scsi_driver.h> +#include <scsi/scsi_eh.h> +#include "ufshcd-priv.h" +#include <ufs/ufs_quirks.h> +#include <ufs/unipro.h> +#include "ufs-sysfs.h" +#include "ufs-debugfs.h" +#include "ufs-fault-injection.h" +#include "ufs_bsg.h" +#include "ufshcd-crypto.h" +#include "ufshpb.h" +#include <asm/unaligned.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/ufs.h> + +#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ + UTP_TASK_REQ_COMPL |\ + UFSHCD_ERROR_MASK) +/* UIC command timeout, unit: ms */ +#define UIC_CMD_TIMEOUT 500 + +/* NOP OUT retries waiting for NOP IN response */ +#define NOP_OUT_RETRIES 10 +/* Timeout after 50 msecs if NOP OUT hangs without response */ +#define NOP_OUT_TIMEOUT 50 /* msecs */ + +/* Query request retries */ +#define QUERY_REQ_RETRIES 3 +/* Query request timeout */ +#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ + +/* Task management command timeout */ +#define TM_CMD_TIMEOUT 100 /* msecs */ + +/* maximum number of retries for a general UIC command */ +#define UFS_UIC_COMMAND_RETRIES 3 + +/* maximum number of link-startup retries */ +#define DME_LINKSTARTUP_RETRIES 3 + +/* Maximum retries for Hibern8 enter */ +#define UIC_HIBERN8_ENTER_RETRIES 3 + +/* maximum number of reset retries before giving up */ +#define MAX_HOST_RESET_RETRIES 5 + +/* Maximum number of error handler retries before giving up */ +#define MAX_ERR_HANDLER_RETRIES 5 + +/* Expose the flag value from utp_upiu_query.value */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + +/* Interrupt aggregation default timeout, unit: 40us */ +#define INT_AGGR_DEF_TO 0x02 + +/* default delay of autosuspend: 2000 ms */ +#define RPM_AUTOSUSPEND_DELAY_MS 2000 + +/* Default delay of RPM device flush delayed work */ +#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000 + +/* Default value of wait time before gating device ref clock */ +#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ + +/* Polling time to wait for fDeviceInit */ +#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ + +#define ufshcd_toggle_vreg(_dev, _vreg, _on) \ + ({ \ + int _ret; \ + if (_on) \ + _ret = ufshcd_enable_vreg(_dev, _vreg); \ + else \ + _ret = ufshcd_disable_vreg(_dev, _vreg); \ + _ret; \ + }) + +#define ufshcd_hex_dump(prefix_str, buf, len) do { \ + size_t __len = (len); \ + print_hex_dump(KERN_ERR, prefix_str, \ + __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ + 16, 4, buf, __len, false); \ +} while (0) + +int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix) +{ + u32 *regs; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) { + if (offset == 0 && + pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && + pos <= REG_UIC_ERROR_CODE_DME) + continue; + regs[pos / 4] = ufshcd_readl(hba, offset + pos); + } + + ufshcd_hex_dump(prefix, regs, len); + kfree(regs); + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_dump_regs); + +enum { + UFSHCD_MAX_CHANNEL = 0, + UFSHCD_MAX_ID = 1, + UFSHCD_NUM_RESERVED = 1, + UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, + UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, +}; + +static const char *const ufshcd_state_name[] = { + [UFSHCD_STATE_RESET] = "reset", + [UFSHCD_STATE_OPERATIONAL] = "operational", + [UFSHCD_STATE_ERROR] = "error", + [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal", + [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal", +}; + +/* UFSHCD error handling flags */ +enum { + UFSHCD_EH_IN_PROGRESS = (1 << 0), +}; + +/* UFSHCD UIC layer error flags */ +enum { + UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ + UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ + UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */ +}; + +#define ufshcd_set_eh_in_progress(h) \ + ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) +#define ufshcd_eh_in_progress(h) \ + ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) +#define ufshcd_clear_eh_in_progress(h) \ + ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) + +struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { + [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, + /* + * For DeepSleep, the link is first put in hibern8 and then off. + * Leaving the link in hibern8 is not supported. + */ + [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE}, +}; + +static inline enum ufs_dev_pwr_mode +ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].dev_state; +} + +static inline enum uic_link_state +ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].link_state; +} + +static inline enum ufs_pm_level +ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, + enum uic_link_state link_state) +{ + enum ufs_pm_level lvl; + + for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { + if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && + (ufs_pm_lvl_states[lvl].link_state == link_state)) + return lvl; + } + + /* if no match found, return the level 0 */ + return UFS_PM_LVL_0; +} + +static const struct ufs_dev_quirk ufs_fixups[] = { + /* UFS cards deviations table */ + { .wmanufacturerid = UFS_VENDOR_MICRON, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ }, + { .wmanufacturerid = UFS_VENDOR_SAMSUNG, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = "hB8aL1" /*H28U62301AMR*/, + .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = "THGLF2G9C8KBADG", + .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = "THGLF2G9D8KBADG", + .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE }, + {} +}; + +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); +static void ufshcd_async_scan(void *data, async_cookie_t cookie); +static int ufshcd_reset_and_restore(struct ufs_hba *hba); +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); +static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); +static void ufshcd_resume_clkscaling(struct ufs_hba *hba); +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); +static irqreturn_t ufshcd_intr(int irq, void *__hba); +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode); +static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); +static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); +static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, + struct ufs_vreg *vreg); +static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); +static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set); +static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable); +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); + +static inline void ufshcd_enable_irq(struct ufs_hba *hba) +{ + if (!hba->is_irq_enabled) { + enable_irq(hba->irq); + hba->is_irq_enabled = true; + } +} + +static inline void ufshcd_disable_irq(struct ufs_hba *hba) +{ + if (hba->is_irq_enabled) { + disable_irq(hba->irq); + hba->is_irq_enabled = false; + } +} + +static inline void ufshcd_wb_config(struct ufs_hba *hba) +{ + if (!ufshcd_is_wb_allowed(hba)) + return; + + ufshcd_wb_toggle(hba, true); + + ufshcd_wb_toggle_flush_during_h8(hba, true); + if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) + ufshcd_wb_toggle_flush(hba, true); +} + +static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) +{ + if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) + scsi_unblock_requests(hba->host); +} + +static void ufshcd_scsi_block_requests(struct ufs_hba *hba) +{ + if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) + scsi_block_requests(hba->host); +} + +static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) +{ + struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; + struct utp_upiu_header *header; + + if (!trace_ufshcd_upiu_enabled()) + return; + + if (str_t == UFS_CMD_SEND) + header = &rq->header; + else + header = &hba->lrb[tag].ucd_rsp_ptr->header; + + trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, + UFS_TSF_CDB); +} + +static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, + enum ufs_trace_str_t str_t, + struct utp_upiu_req *rq_rsp) +{ + if (!trace_ufshcd_upiu_enabled()) + return; + + trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, + &rq_rsp->qr, UFS_TSF_OSF); +} + +static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) +{ + struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; + + if (!trace_ufshcd_upiu_enabled()) + return; + + if (str_t == UFS_TM_SEND) + trace_ufshcd_upiu(dev_name(hba->dev), str_t, + &descp->upiu_req.req_header, + &descp->upiu_req.input_param1, + UFS_TSF_TM_INPUT); + else + trace_ufshcd_upiu(dev_name(hba->dev), str_t, + &descp->upiu_rsp.rsp_header, + &descp->upiu_rsp.output_param1, + UFS_TSF_TM_OUTPUT); +} + +static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, + struct uic_command *ucmd, + enum ufs_trace_str_t str_t) +{ + u32 cmd; + + if (!trace_ufshcd_uic_command_enabled()) + return; + + if (str_t == UFS_CMD_SEND) + cmd = ucmd->command; + else + cmd = ufshcd_readl(hba, REG_UIC_COMMAND); + + trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); +} + +static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) +{ + u64 lba = 0; + u8 opcode = 0, group_id = 0; + u32 intr, doorbell; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; + struct request *rq = scsi_cmd_to_rq(cmd); + int transfer_len = -1; + + if (!cmd) + return; + + /* trace UPIU also */ + ufshcd_add_cmd_upiu_trace(hba, tag, str_t); + if (!trace_ufshcd_command_enabled()) + return; + + opcode = cmd->cmnd[0]; + + if (opcode == READ_10 || opcode == WRITE_10) { + /* + * Currently we only fully trace read(10) and write(10) commands + */ + transfer_len = + be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len); + lba = scsi_get_lba(cmd); + if (opcode == WRITE_10) + group_id = lrbp->cmd->cmnd[6]; + } else if (opcode == UNMAP) { + /* + * The number of Bytes to be unmapped beginning with the lba. + */ + transfer_len = blk_rq_bytes(rq); + lba = scsi_get_lba(cmd); + } + + intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + trace_ufshcd_command(dev_name(hba->dev), str_t, tag, + doorbell, transfer_len, intr, lba, opcode, group_id); +} + +static void ufshcd_print_clk_freqs(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + return; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && + clki->max_freq) + dev_err(hba->dev, "clk: %s, rate: %u\n", + clki->name, clki->curr_freq); + } +} + +static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, + char *err_name) +{ + int i; + bool found = false; + struct ufs_event_hist *e; + + if (id >= UFS_EVT_CNT) + return; + + e = &hba->ufs_stats.event[id]; + + for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) { + int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH; + + if (e->tstamp[p] == 0) + continue; + dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, + e->val[p], ktime_to_us(e->tstamp[p])); + found = true; + } + + if (!found) + dev_err(hba->dev, "No record of %s\n", err_name); + else + dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); +} + +static void ufshcd_print_evt_hist(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + + ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); + ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); + ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); + ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); + ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); + ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, + "auto_hibern8_err"); + ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); + ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, + "link_startup_fail"); + ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); + ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, + "suspend_fail"); + ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); + ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); + ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); + + ufshcd_vops_dbg_register_dump(hba); +} + +static +void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) +{ + struct ufshcd_lrb *lrbp; + int prdt_length; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutrs) { + lrbp = &hba->lrb[tag]; + + dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", + tag, ktime_to_us(lrbp->issue_time_stamp)); + dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", + tag, ktime_to_us(lrbp->compl_time_stamp)); + dev_err(hba->dev, + "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", + tag, (u64)lrbp->utrd_dma_addr); + + ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, + sizeof(struct utp_transfer_req_desc)); + dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, + (u64)lrbp->ucd_req_dma_addr); + ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, + sizeof(struct utp_upiu_req)); + dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, + (u64)lrbp->ucd_rsp_dma_addr); + ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, + sizeof(struct utp_upiu_rsp)); + + prdt_length = le16_to_cpu( + lrbp->utr_descriptor_ptr->prd_table_length); + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) + prdt_length /= sizeof(struct ufshcd_sg_entry); + + dev_err(hba->dev, + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", + tag, prdt_length, + (u64)lrbp->ucd_prdt_dma_addr); + + if (pr_prdt) + ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, + sizeof(struct ufshcd_sg_entry) * prdt_length); + } +} + +static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) +{ + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutmrs) { + struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; + + dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); + ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp)); + } +} + +static void ufshcd_print_host_state(struct ufs_hba *hba) +{ + struct scsi_device *sdev_ufs = hba->ufs_device_wlun; + + dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", + hba->saved_err, hba->saved_uic_err); + dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", + hba->curr_dev_pwr_mode, hba->uic_link_state); + dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", + hba->pm_op_in_progress, hba->is_sys_suspended); + dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", + hba->auto_bkops_enabled, hba->host->host_self_blocked); + dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); + dev_err(hba->dev, + "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", + ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), + hba->ufs_stats.hibern8_exit_cnt); + dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", + ktime_to_us(hba->ufs_stats.last_intr_ts), + hba->ufs_stats.last_intr_status); + dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", + hba->eh_flags, hba->req_abort_count); + dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", + hba->ufs_version, hba->capabilities, hba->caps); + dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, + hba->dev_quirks); + if (sdev_ufs) + dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", + sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev); + + ufshcd_print_clk_freqs(hba); +} + +/** + * ufshcd_print_pwr_info - print power params as saved in hba + * power info + * @hba: per-adapter instance + */ +static void ufshcd_print_pwr_info(struct ufs_hba *hba) +{ + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW_MODE", + "INVALID MODE", + "FASTAUTO_MODE", + "SLOWAUTO_MODE", + "INVALID MODE", + }; + + /* + * Using dev_dbg to avoid messages during runtime PM to avoid + * never-ending cycles of messages written back to storage by user space + * causing runtime resume, causing more messages and so on. + */ + dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", + __func__, + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate); +} + +static void ufshcd_device_reset(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_vops_device_reset(hba); + + if (!err) { + ufshcd_set_ufs_dev_active(hba); + if (ufshcd_is_wb_allowed(hba)) { + hba->dev_info.wb_enabled = false; + hba->dev_info.wb_buf_flush_enabled = false; + } + } + if (err != -EOPNOTSUPP) + ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); +} + +void ufshcd_delay_us(unsigned long us, unsigned long tolerance) +{ + if (!us) + return; + + if (us < 10) + udelay(us); + else + usleep_range(us, us + tolerance); +} +EXPORT_SYMBOL_GPL(ufshcd_delay_us); + +/** + * ufshcd_wait_for_register - wait for register value to change + * @hba: per-adapter interface + * @reg: mmio register offset + * @mask: mask to apply to the read register value + * @val: value to wait for + * @interval_us: polling interval in microseconds + * @timeout_ms: timeout in milliseconds + * + * Return: + * -ETIMEDOUT on error, zero on success. + */ +static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms) +{ + int err = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + /* ignore bits that we don't intend to wait on */ + val = val & mask; + + while ((ufshcd_readl(hba, reg) & mask) != val) { + usleep_range(interval_us, interval_us + 50); + if (time_after(jiffies, timeout)) { + if ((ufshcd_readl(hba, reg) & mask) != val) + err = -ETIMEDOUT; + break; + } + } + + return err; +} + +/** + * ufshcd_get_intr_mask - Get the interrupt bit mask + * @hba: Pointer to adapter instance + * + * Returns interrupt bit mask per version + */ +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) +{ + if (hba->ufs_version == ufshci_version(1, 0)) + return INTERRUPT_MASK_ALL_VER_10; + if (hba->ufs_version <= ufshci_version(2, 0)) + return INTERRUPT_MASK_ALL_VER_11; + + return INTERRUPT_MASK_ALL_VER_21; +} + +/** + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA + * @hba: Pointer to adapter instance + * + * Returns UFSHCI version supported by the controller + */ +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) +{ + u32 ufshci_ver; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) + ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); + else + ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); + + /* + * UFSHCI v1.x uses a different version scheme, in order + * to allow the use of comparisons with the ufshci_version + * function, we convert it to the same scheme as ufs 2.0+. + */ + if (ufshci_ver & 0x00010000) + return ufshci_version(1, ufshci_ver & 0x00000100); + + return ufshci_ver; +} + +/** + * ufshcd_is_device_present - Check if any device connected to + * the host controller + * @hba: pointer to adapter instance + * + * Returns true if device present, false if no device detected + */ +static inline bool ufshcd_is_device_present(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; +} + +/** + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status + * @lrbp: pointer to local command reference block + * + * This function is used to get the OCS field from UTRD + * Returns the OCS field in the UTRD + */ +static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) +{ + return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; +} + +/** + * ufshcd_utrl_clear - Clear a bit in UTRLCLR register + * @hba: per adapter instance + * @pos: position of the bit to be cleared + */ +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) +{ + if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) + ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); + else + ufshcd_writel(hba, ~(1 << pos), + REG_UTP_TRANSFER_REQ_LIST_CLEAR); +} + +/** + * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register + * @hba: per adapter instance + * @pos: position of the bit to be cleared + */ +static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) +{ + if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) + ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); + else + ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); +} + +/** + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY + * @reg: Register value of host controller status + * + * Returns integer, 0 on Success and positive value if failed + */ +static inline int ufshcd_get_lists_status(u32 reg) +{ + return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); +} + +/** + * ufshcd_get_uic_cmd_result - Get the UIC command result + * @hba: Pointer to adapter instance + * + * This function gets the result of UIC command completion + * Returns 0 on success, non zero value on error + */ +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & + MASK_UIC_COMMAND_RESULT; +} + +/** + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command + * @hba: Pointer to adapter instance + * + * This function gets UIC command argument3 + * Returns 0 on success, non zero value on error + */ +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); +} + +/** + * ufshcd_get_req_rsp - returns the TR response transaction type + * @ucd_rsp_ptr: pointer to response UPIU + */ +static inline int +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; +} + +/** + * ufshcd_get_rsp_upiu_result - Get the result from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * This function gets the response status and scsi_status from response UPIU + * Returns the response result code. + */ +static inline int +ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; +} + +/* + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length + * from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * Return the data segment length. + */ +static inline unsigned int +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_UPIU_DATA_SEG_LEN; +} + +/** + * ufshcd_is_exception_event - Check if the device raised an exception event + * @ucd_rsp_ptr: pointer to response UPIU + * + * The function checks if the device raised an exception event indicated in + * the Device Information field of response UPIU. + * + * Returns true if exception is raised, false otherwise. + */ +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_EXCEPTION_EVENT; +} + +/** + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. + * @hba: per adapter instance + */ +static inline void +ufshcd_reset_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | + INT_AGGR_COUNTER_AND_TIMER_RESET, + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_config_intr_aggr - Configure interrupt aggregation values. + * @hba: per adapter instance + * @cnt: Interrupt aggregation counter threshold + * @tmout: Interrupt aggregation timeout value + */ +static inline void +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | + INT_AGGR_COUNTER_THLD_VAL(cnt) | + INT_AGGR_TIMEOUT_VAL(tmout), + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_disable_intr_aggr - Disables interrupt aggregation. + * @hba: per adapter instance + */ +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_enable_run_stop_reg - Enable run-stop registers, + * When run-stop registers are set to 1, it indicates the + * host controller that it can process the requests + * @hba: per adapter instance + */ +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) +{ + ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TASK_REQ_LIST_RUN_STOP); + ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); +} + +/** + * ufshcd_hba_start - Start controller initialization sequence + * @hba: per adapter instance + */ +static inline void ufshcd_hba_start(struct ufs_hba *hba) +{ + u32 val = CONTROLLER_ENABLE; + + if (ufshcd_crypto_enable(hba)) + val |= CRYPTO_GENERAL_ENABLE; + + ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); +} + +/** + * ufshcd_is_hba_active - Get controller state + * @hba: per adapter instance + * + * Returns true if and only if the controller is active. + */ +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; +} + +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) +{ + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ + if (hba->ufs_version <= ufshci_version(1, 1)) + return UFS_UNIPRO_VER_1_41; + else + return UFS_UNIPRO_VER_1_6; +} +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); + +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) +{ + /* + * If both host and device support UniPro ver1.6 or later, PA layer + * parameters tuning happens during link startup itself. + * + * We can manually tune PA layer parameters if either host or device + * doesn't support UniPro ver 1.6 or later. But to keep manual tuning + * logic simple, we will only do manual tuning if local unipro version + * doesn't support ver1.6 or later. + */ + return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; +} + +/** + * ufshcd_set_clk_freq - set UFS controller clock frequencies + * @hba: per adapter instance + * @scale_up: If True, set max possible frequency othewise set low frequency + * + * Returns 0 if successful + * Returns < 0 for any other errors + */ +static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + break; + } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled up", clki->name, + clki->curr_freq, + clki->max_freq); + + clki->curr_freq = clki->max_freq; + + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + + ret = clk_set_rate(clki->clk, clki->min_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->min_freq, ret); + break; + } + trace_ufshcd_clk_scaling(dev_name(hba->dev), + "scaled down", clki->name, + clki->curr_freq, + clki->min_freq); + clki->curr_freq = clki->min_freq; + } + } + dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } + +out: + return ret; +} + +/** + * ufshcd_scale_clks - scale up or scale down UFS controller clocks + * @hba: per adapter instance + * @scale_up: True if scaling up and false if scaling down + * + * Returns 0 if successful + * Returns < 0 for any other errors + */ +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + ktime_t start = ktime_get(); + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); + if (ret) + goto out; + + ret = ufshcd_set_clk_freq(hba, scale_up); + if (ret) + goto out; + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); + if (ret) + ufshcd_set_clk_freq(hba, !scale_up); + +out: + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + return ret; +} + +/** + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not + * @hba: per adapter instance + * @scale_up: True if scaling up and false if scaling down + * + * Returns true if scaling is required, false otherwise. + */ +static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, + bool scale_up) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + return false; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + return true; + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + return true; + } + } + } + + return false; +} + +/* + * Determine the number of pending commands by counting the bits in the SCSI + * device budget maps. This approach has been selected because a bit is set in + * the budget map before scsi_host_queue_ready() checks the host_self_blocked + * flag. The host_self_blocked flag can be modified by calling + * scsi_block_requests() or scsi_unblock_requests(). + */ +static u32 ufshcd_pending_cmds(struct ufs_hba *hba) +{ + struct scsi_device *sdev; + u32 pending = 0; + + lockdep_assert_held(hba->host->host_lock); + __shost_for_each_device(sdev, hba->host) + pending += sbitmap_weight(&sdev->budget_map); + + return pending; +} + +static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, + u64 wait_timeout_us) +{ + unsigned long flags; + int ret = 0; + u32 tm_doorbell; + u32 tr_pending; + bool timeout = false, do_last_check = false; + ktime_t start; + + ufshcd_hold(hba, false); + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * Wait for all the outstanding tasks/transfer requests. + * Verify by checking the doorbell registers are clear. + */ + start = ktime_get(); + do { + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { + ret = -EBUSY; + goto out; + } + + tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); + tr_pending = ufshcd_pending_cmds(hba); + if (!tm_doorbell && !tr_pending) { + timeout = false; + break; + } else if (do_last_check) { + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + schedule(); + if (ktime_to_us(ktime_sub(ktime_get(), start)) > + wait_timeout_us) { + timeout = true; + /* + * We might have scheduled out for long time so make + * sure to check if doorbells are cleared by this time + * or not. + */ + do_last_check = true; + } + spin_lock_irqsave(hba->host->host_lock, flags); + } while (tm_doorbell || tr_pending); + + if (timeout) { + dev_err(hba->dev, + "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", + __func__, tm_doorbell, tr_pending); + ret = -EBUSY; + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); + return ret; +} + +/** + * ufshcd_scale_gear - scale up/down UFS gear + * @hba: per adapter instance + * @scale_up: True for scaling up gear and false for scaling down + * + * Returns 0 for success, + * Returns -EBUSY if scaling can't happen at this time + * Returns non-zero for any other errors + */ +static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_pa_layer_attr new_pwr_info; + + if (scale_up) { + memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, + sizeof(struct ufs_pa_layer_attr)); + } else { + memcpy(&new_pwr_info, &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || + hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { + /* save the current power mode */ + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + /* scale down gear */ + new_pwr_info.gear_tx = hba->clk_scaling.min_gear; + new_pwr_info.gear_rx = hba->clk_scaling.min_gear; + } + } + + /* check if the power mode needs to be changed or not? */ + ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); + if (ret) + dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", + __func__, ret, + hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, + new_pwr_info.gear_tx, new_pwr_info.gear_rx); + + return ret; +} + +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) +{ + #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */ + int ret = 0; + /* + * make sure that there are no outstanding requests when + * clock scaling is in progress + */ + ufshcd_scsi_block_requests(hba); + down_write(&hba->clk_scaling_lock); + + if (!hba->clk_scaling.is_allowed || + ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { + ret = -EBUSY; + up_write(&hba->clk_scaling_lock); + ufshcd_scsi_unblock_requests(hba); + goto out; + } + + /* let's not get into low power until clock scaling is completed */ + ufshcd_hold(hba, false); + +out: + return ret; +} + +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) +{ + if (writelock) + up_write(&hba->clk_scaling_lock); + else + up_read(&hba->clk_scaling_lock); + ufshcd_scsi_unblock_requests(hba); + ufshcd_release(hba); +} + +/** + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scalin down + * + * Returns 0 for success, + * Returns -EBUSY if scaling can't happen at this time + * Returns non-zero for any other errors + */ +static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + bool is_writelock = true; + + ret = ufshcd_clock_scaling_prepare(hba); + if (ret) + return ret; + + /* scale down the gear before scaling down clocks */ + if (!scale_up) { + ret = ufshcd_scale_gear(hba, false); + if (ret) + goto out_unprepare; + } + + ret = ufshcd_scale_clks(hba, scale_up); + if (ret) { + if (!scale_up) + ufshcd_scale_gear(hba, true); + goto out_unprepare; + } + + /* scale up the gear after scaling up clocks */ + if (scale_up) { + ret = ufshcd_scale_gear(hba, true); + if (ret) { + ufshcd_scale_clks(hba, false); + goto out_unprepare; + } + } + + /* Enable Write Booster if we have scaled up else disable it */ + downgrade_write(&hba->clk_scaling_lock); + is_writelock = false; + ufshcd_wb_toggle(hba, scale_up); + +out_unprepare: + ufshcd_clock_scaling_unprepare(hba, is_writelock); + return ret; +} + +static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_scaling.suspend_work); + unsigned long irq_flags; + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return; + } + hba->clk_scaling.is_suspended = true; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + __ufshcd_suspend_clkscaling(hba); +} + +static void ufshcd_clk_scaling_resume_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_scaling.resume_work); + unsigned long irq_flags; + + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (!hba->clk_scaling.is_suspended) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return; + } + hba->clk_scaling.is_suspended = false; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + devfreq_resume_device(hba->devfreq); +} + +static int ufshcd_devfreq_target(struct device *dev, + unsigned long *freq, u32 flags) +{ + int ret = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); + ktime_t start; + bool scale_up, sched_clk_scaling_suspend_work = false; + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + unsigned long irq_flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + + clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); + /* Override with the closest supported frequency */ + *freq = (unsigned long) clk_round_rate(clki->clk, *freq); + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + return 0; + } + + if (!hba->clk_scaling.active_reqs) + sched_clk_scaling_suspend_work = true; + + if (list_empty(clk_list)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + goto out; + } + + /* Decide based on the rounded-off frequency and update */ + scale_up = *freq == clki->max_freq; + if (!scale_up) + *freq = clki->min_freq; + /* Update the frequency */ + if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + ret = 0; + goto out; /* no state change required */ + } + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + start = ktime_get(); + ret = ufshcd_devfreq_scale(hba, scale_up); + + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + +out: + if (sched_clk_scaling_suspend_work) + queue_work(hba->clk_scaling.workq, + &hba->clk_scaling.suspend_work); + + return ret; +} + +static int ufshcd_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + ktime_t curr_t; + + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + + memset(stat, 0, sizeof(*stat)); + + spin_lock_irqsave(hba->host->host_lock, flags); + curr_t = ktime_get(); + if (!scaling->window_start_t) + goto start_window; + + clki = list_first_entry(clk_list, struct ufs_clk_info, list); + /* + * If current frequency is 0, then the ondemand governor considers + * there's no initial frequency set. And it always requests to set + * to max. frequency. + */ + stat->current_frequency = clki->curr_freq; + if (scaling->is_busy_started) + scaling->tot_busy_t += ktime_us_delta(curr_t, + scaling->busy_start_t); + + stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); + stat->busy_time = scaling->tot_busy_t; +start_window: + scaling->window_start_t = curr_t; + scaling->tot_busy_t = 0; + + if (hba->outstanding_reqs) { + scaling->busy_start_t = curr_t; + scaling->is_busy_started = true; + } else { + scaling->busy_start_t = 0; + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; +} + +static int ufshcd_devfreq_init(struct ufs_hba *hba) +{ + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + struct devfreq *devfreq; + int ret; + + /* Skip devfreq if we don't have any clocks in the list */ + if (list_empty(clk_list)) + return 0; + + clki = list_first_entry(clk_list, struct ufs_clk_info, list); + dev_pm_opp_add(hba->dev, clki->min_freq, 0); + dev_pm_opp_add(hba->dev, clki->max_freq, 0); + + ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, + &hba->vps->ondemand_data); + devfreq = devfreq_add_device(hba->dev, + &hba->vps->devfreq_profile, + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &hba->vps->ondemand_data); + if (IS_ERR(devfreq)) { + ret = PTR_ERR(devfreq); + dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); + + dev_pm_opp_remove(hba->dev, clki->min_freq); + dev_pm_opp_remove(hba->dev, clki->max_freq); + return ret; + } + + hba->devfreq = devfreq; + + return 0; +} + +static void ufshcd_devfreq_remove(struct ufs_hba *hba) +{ + struct list_head *clk_list = &hba->clk_list_head; + struct ufs_clk_info *clki; + + if (!hba->devfreq) + return; + + devfreq_remove_device(hba->devfreq); + hba->devfreq = NULL; + + clki = list_first_entry(clk_list, struct ufs_clk_info, list); + dev_pm_opp_remove(hba->dev, clki->min_freq); + dev_pm_opp_remove(hba->dev, clki->max_freq); +} + +static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + + devfreq_suspend_device(hba->devfreq); + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_scaling.window_start_t = 0; + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + bool suspend = false; + + cancel_work_sync(&hba->clk_scaling.suspend_work); + cancel_work_sync(&hba->clk_scaling.resume_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->clk_scaling.is_suspended) { + suspend = true; + hba->clk_scaling.is_suspended = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (suspend) + __ufshcd_suspend_clkscaling(hba); +} + +static void ufshcd_resume_clkscaling(struct ufs_hba *hba) +{ + unsigned long flags; + bool resume = false; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_scaling.is_suspended) { + resume = true; + hba->clk_scaling.is_suspended = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (resume) + devfreq_resume_device(hba->devfreq); +} + +static ssize_t ufshcd_clkscale_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); +} + +static ssize_t ufshcd_clkscale_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int err = 0; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + err = -EBUSY; + goto out; + } + + value = !!value; + if (value == hba->clk_scaling.is_enabled) + goto out; + + ufshcd_rpm_get_sync(hba); + ufshcd_hold(hba, false); + + hba->clk_scaling.is_enabled = value; + + if (value) { + ufshcd_resume_clkscaling(hba); + } else { + ufshcd_suspend_clkscaling(hba); + err = ufshcd_devfreq_scale(hba, true); + if (err) + dev_err(hba->dev, "%s: failed to scale clocks up %d\n", + __func__, err); + } + + ufshcd_release(hba); + ufshcd_rpm_put_sync(hba); +out: + up(&hba->host_sem); + return err ? err : count; +} + +static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) +{ + hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; + hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; + sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); + hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; + hba->clk_scaling.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); +} + +static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) +{ + if (hba->clk_scaling.enable_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); +} + +static void ufshcd_init_clk_scaling(struct ufs_hba *hba) +{ + char wq_name[sizeof("ufs_clkscaling_00")]; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + if (!hba->clk_scaling.min_gear) + hba->clk_scaling.min_gear = UFS_HS_G1; + + INIT_WORK(&hba->clk_scaling.suspend_work, + ufshcd_clk_scaling_suspend_work); + INIT_WORK(&hba->clk_scaling.resume_work, + ufshcd_clk_scaling_resume_work); + + snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", + hba->host->host_no); + hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + + hba->clk_scaling.is_initialized = true; +} + +static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) +{ + if (!hba->clk_scaling.is_initialized) + return; + + ufshcd_remove_clk_scaling_sysfs(hba); + destroy_workqueue(hba->clk_scaling.workq); + ufshcd_devfreq_remove(hba); + hba->clk_scaling.is_initialized = false; +} + +static void ufshcd_ungate_work(struct work_struct *work) +{ + int ret; + unsigned long flags; + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.ungate_work); + + cancel_delayed_work_sync(&hba->clk_gating.gate_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == CLKS_ON) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto unblock_reqs; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_hba_vreg_set_hpm(hba); + ufshcd_setup_clocks(hba, true); + + ufshcd_enable_irq(hba); + + /* Exit from hibern8 */ + if (ufshcd_can_hibern8_during_gating(hba)) { + /* Prevent gating in this path */ + hba->clk_gating.is_suspended = true; + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (ret) + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + else + ufshcd_set_link_active(hba); + } + hba->clk_gating.is_suspended = false; + } +unblock_reqs: + ufshcd_scsi_unblock_requests(hba); +} + +/** + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. + * Also, exit from hibern8 mode and set the link as active. + * @hba: per adapter instance + * @async: This indicates whether caller should ungate clocks asynchronously. + */ +int ufshcd_hold(struct ufs_hba *hba, bool async) +{ + int rc = 0; + bool flush_result; + unsigned long flags; + + if (!ufshcd_is_clkgating_allowed(hba) || + !hba->clk_gating.is_initialized) + goto out; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.active_reqs++; + +start: + switch (hba->clk_gating.state) { + case CLKS_ON: + /* + * Wait for the ungate work to complete if in progress. + * Though the clocks may be in ON state, the link could + * still be in hibner8 state if hibern8 is allowed + * during clock gating. + * Make sure we exit hibern8 state also in addition to + * clocks being ON. + */ + if (ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_link_hibern8(hba)) { + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + } + break; + case REQ_CLKS_OFF: + if (cancel_delayed_work(&hba->clk_gating.gate_work)) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + break; + } + /* + * If we are here, it means gating work is either done or + * currently running. Hence, fall through to cancel gating + * work and to enable clocks. + */ + fallthrough; + case CLKS_OFF: + hba->clk_gating.state = REQ_CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + if (queue_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.ungate_work)) + ufshcd_scsi_block_requests(hba); + /* + * fall through to check if we should wait for this + * work to be done or not. + */ + fallthrough; + case REQ_CLKS_ON: + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_work(&hba->clk_gating.ungate_work); + /* Make sure state is CLKS_ON before returning */ + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + default: + dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", + __func__, hba->clk_gating.state); + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return rc; +} +EXPORT_SYMBOL_GPL(ufshcd_hold); + +static void ufshcd_gate_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.gate_work.work); + unsigned long flags; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case save time by + * skipping the gating work and exit after changing the clock + * state to CLKS_ON. + */ + if (hba->clk_gating.is_suspended || + (hba->clk_gating.state != REQ_CLKS_OFF)) { + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + goto rel_lock; + } + + if (hba->clk_gating.active_reqs + || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL + || hba->outstanding_reqs || hba->outstanding_tasks + || hba->active_uic_cmd || hba->uic_async_done) + goto rel_lock; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* put the link into hibern8 mode before turning off clocks */ + if (ufshcd_can_hibern8_during_gating(hba)) { + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) { + hba->clk_gating.state = CLKS_ON; + dev_err(hba->dev, "%s: hibern8 enter failed %d\n", + __func__, ret); + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + goto out; + } + ufshcd_set_link_hibern8(hba); + } + + ufshcd_disable_irq(hba); + + ufshcd_setup_clocks(hba, false); + + /* Put the host controller in low power mode if possible */ + ufshcd_hba_vreg_set_lpm(hba); + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case keep the state + * as REQ_CLKS_ON which would anyway imply that clocks are off + * and a request to turn them on is pending. By doing this way, + * we keep the state machine in tact and this would ultimately + * prevent from doing cancel work multiple times when there are + * new requests arriving before the current cancel work is done. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == REQ_CLKS_OFF) { + hba->clk_gating.state = CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + } +rel_lock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return; +} + +/* host lock must be held before calling this variant */ +static void __ufshcd_release(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.active_reqs--; + + if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || + hba->outstanding_tasks || !hba->clk_gating.is_initialized || + hba->active_uic_cmd || hba->uic_async_done || + hba->clk_gating.state == CLKS_OFF) + return; + + hba->clk_gating.state = REQ_CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); + queue_delayed_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.gate_work, + msecs_to_jiffies(hba->clk_gating.delay_ms)); +} + +void ufshcd_release(struct ufs_hba *hba) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + __ufshcd_release(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_release); + +static ssize_t ufshcd_clkgate_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); +} + +void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); + +static ssize_t ufshcd_clkgate_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + ufshcd_clkgate_delay_set(dev, value); + return count; +} + +static ssize_t ufshcd_clkgate_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); +} + +static ssize_t ufshcd_clkgate_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags; + u32 value; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + value = !!value; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (value == hba->clk_gating.is_enabled) + goto out; + + if (value) + __ufshcd_release(hba); + else + hba->clk_gating.active_reqs++; + + hba->clk_gating.is_enabled = value; +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) +{ + hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; + hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; + sysfs_attr_init(&hba->clk_gating.delay_attr.attr); + hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; + hba->clk_gating.delay_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); + + hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; + hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; + sysfs_attr_init(&hba->clk_gating.enable_attr.attr); + hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; + hba->clk_gating.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); +} + +static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) +{ + if (hba->clk_gating.delay_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_gating.delay_attr); + if (hba->clk_gating.enable_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_gating.enable_attr); +} + +static void ufshcd_init_clk_gating(struct ufs_hba *hba) +{ + char wq_name[sizeof("ufs_clk_gating_00")]; + + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.state = CLKS_ON; + + hba->clk_gating.delay_ms = 150; + INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); + INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + + snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", + hba->host->host_no); + hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, + WQ_MEM_RECLAIM | WQ_HIGHPRI); + + ufshcd_init_clk_gating_sysfs(hba); + + hba->clk_gating.is_enabled = true; + hba->clk_gating.is_initialized = true; +} + +static void ufshcd_exit_clk_gating(struct ufs_hba *hba) +{ + if (!hba->clk_gating.is_initialized) + return; + + ufshcd_remove_clk_gating_sysfs(hba); + + /* Ungate the clock if necessary. */ + ufshcd_hold(hba, false); + hba->clk_gating.is_initialized = false; + ufshcd_release(hba); + + destroy_workqueue(hba->clk_gating.clk_gating_workq); +} + +/* Must be called with host lock acquired */ +static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) +{ + bool queue_resume_work = false; + ktime_t curr_t = ktime_get(); + unsigned long flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->clk_scaling.active_reqs++) + queue_resume_work = true; + + if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return; + } + + if (queue_resume_work) + queue_work(hba->clk_scaling.workq, + &hba->clk_scaling.resume_work); + + if (!hba->clk_scaling.window_start_t) { + hba->clk_scaling.window_start_t = curr_t; + hba->clk_scaling.tot_busy_t = 0; + hba->clk_scaling.is_busy_started = false; + } + + if (!hba->clk_scaling.is_busy_started) { + hba->clk_scaling.busy_start_t = curr_t; + hba->clk_scaling.is_busy_started = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) +{ + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_scaling.active_reqs--; + if (!hba->outstanding_reqs && scaling->is_busy_started) { + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + scaling->busy_start_t = 0; + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static inline int ufshcd_monitor_opcode2dir(u8 opcode) +{ + if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16) + return READ; + else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16) + return WRITE; + else + return -EINVAL; +} + +static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct ufs_hba_monitor *m = &hba->monitor; + + return (m->enabled && lrbp && lrbp->cmd && + (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && + ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); +} + +static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) + hba->monitor.busy_start_ts[dir] = ktime_get(); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { + struct request *req = scsi_cmd_to_rq(lrbp->cmd); + struct ufs_hba_monitor *m = &hba->monitor; + ktime_t now, inc, lat; + + now = lrbp->compl_time_stamp; + inc = ktime_sub(now, m->busy_start_ts[dir]); + m->total_busy[dir] = ktime_add(m->total_busy[dir], inc); + m->nr_sec_rw[dir] += blk_rq_sectors(req); + + /* Update latencies */ + m->nr_req[dir]++; + lat = ktime_sub(now, lrbp->issue_time_stamp); + m->lat_sum[dir] += lat; + if (m->lat_max[dir] < lat || !m->lat_max[dir]) + m->lat_max[dir] = lat; + if (m->lat_min[dir] > lat || !m->lat_min[dir]) + m->lat_min[dir] = lat; + + m->nr_queued[dir]--; + /* Push forward the busy start of monitor */ + m->busy_start_ts[dir] = now; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +/** + * ufshcd_send_command - Send SCSI or device management commands + * @hba: per adapter instance + * @task_tag: Task tag of the command + */ +static inline +void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) +{ + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + unsigned long flags; + + lrbp->issue_time_stamp = ktime_get(); + lrbp->compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); + ufshcd_clk_scaling_start_busy(hba); + if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) + ufshcd_start_monitor(hba, lrbp); + + spin_lock_irqsave(&hba->outstanding_lock, flags); + if (hba->vops && hba->vops->setup_xfer_req) + hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); + __set_bit(task_tag, &hba->outstanding_reqs); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); +} + +/** + * ufshcd_copy_sense_data - Copy sense data in case of check condition + * @lrbp: pointer to local reference block + */ +static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) +{ + u8 *const sense_buffer = lrbp->cmd->sense_buffer; + int len; + + if (sense_buffer && + ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { + int len_to_copy; + + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); + len_to_copy = min_t(int, UFS_SENSE_SIZE, len); + + memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, + len_to_copy); + } +} + +/** + * ufshcd_copy_query_response() - Copy the Query Response and the data + * descriptor + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static +int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); + + /* Get the descriptor */ + if (hba->dev_cmd.query.descriptor && + lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + + GENERAL_UPIU_REQUEST_SIZE; + u16 resp_len; + u16 buf_len; + + /* data segment length */ + resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + buf_len = be16_to_cpu( + hba->dev_cmd.query.request.upiu_req.length); + if (likely(buf_len >= resp_len)) { + memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); + } else { + dev_warn(hba->dev, + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, buf_len); + return -EINVAL; + } + } + + return 0; +} + +/** + * ufshcd_hba_capabilities - Read controller capabilities + * @hba: per adapter instance + * + * Return: 0 on success, negative on error. + */ +static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) +{ + int err; + + hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); + + /* nutrs and nutmrs are 0 based values */ + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; + hba->nutmrs = + ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; + hba->reserved_slot = hba->nutrs - 1; + + /* Read crypto capabilities */ + err = ufshcd_hba_init_crypto_capabilities(hba); + if (err) + dev_err(hba->dev, "crypto setup failed\n"); + + return err; +} + +/** + * ufshcd_ready_for_uic_cmd - Check if controller is ready + * to accept UIC commands + * @hba: per adapter instance + * Return true on success, else false + */ +static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; +} + +/** + * ufshcd_get_upmcrs - Get the power mode change request status + * @hba: Pointer to adapter instance + * + * This function gets the UPMCRS field of HCS register + * Returns value of UPMCRS field + */ +static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; +} + +/** + * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer + * @hba: per adapter instance + * @uic_cmd: UIC command + */ +static inline void +ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + lockdep_assert_held(&hba->uic_cmd_mutex); + + WARN_ON(hba->active_uic_cmd); + + hba->active_uic_cmd = uic_cmd; + + /* Write Args */ + ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); + ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); + ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + + ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); + + /* Write UIC Cmd */ + ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, + REG_UIC_COMMAND); +} + +/** + * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Returns 0 only if success. + */ +static int +ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + unsigned long flags; + + lockdep_assert_held(&hba->uic_cmd_mutex); + + if (wait_for_completion_timeout(&uic_cmd->done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; + } else { + ret = -ETIMEDOUT; + dev_err(hba->dev, + "uic cmd 0x%x with arg3 0x%x completion timeout\n", + uic_cmd->command, uic_cmd->argument3); + + if (!uic_cmd->cmd_active) { + dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", + __func__); + ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; + } + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + +/** + * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * @completion: initialize the completion only if this is set to true + * + * Returns 0 only if success. + */ +static int +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, + bool completion) +{ + lockdep_assert_held(&hba->uic_cmd_mutex); + lockdep_assert_held(hba->host->host_lock); + + if (!ufshcd_ready_for_uic_cmd(hba)) { + dev_err(hba->dev, + "Controller not ready to accept UIC commands\n"); + return -EIO; + } + + if (completion) + init_completion(&uic_cmd->done); + + uic_cmd->cmd_active = 1; + ufshcd_dispatch_uic_cmd(hba, uic_cmd); + + return 0; +} + +/** + * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Returns 0 only if success. + */ +int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + unsigned long flags; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) + return 0; + + ufshcd_hold(hba, false); + mutex_lock(&hba->uic_cmd_mutex); + ufshcd_add_delay_before_dme_cmd(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!ret) + ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); + + mutex_unlock(&hba->uic_cmd_mutex); + + ufshcd_release(hba); + return ret; +} + +/** + * ufshcd_map_sg - Map scatter-gather list to prdt + * @hba: per adapter instance + * @lrbp: pointer to local reference block + * + * Returns 0 in case of success, non-zero value in case of failure + */ +static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshcd_sg_entry *prd_table; + struct scatterlist *sg; + struct scsi_cmnd *cmd; + int sg_segments; + int i; + + cmd = lrbp->cmd; + sg_segments = scsi_dma_map(cmd); + if (sg_segments < 0) + return sg_segments; + + if (sg_segments) { + + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((sg_segments * + sizeof(struct ufshcd_sg_entry))); + else + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16(sg_segments); + + prd_table = lrbp->ucd_prdt_ptr; + + scsi_for_each_sg(cmd, sg, sg_segments, i) { + const unsigned int len = sg_dma_len(sg); + + /* + * From the UFSHCI spec: "Data Byte Count (DBC): A '0' + * based value that indicates the length, in bytes, of + * the data block. A maximum of length of 256KB may + * exist for any entry. Bits 1:0 of this field shall be + * 11b to indicate Dword granularity. A value of '3' + * indicates 4 bytes, '7' indicates 8 bytes, etc." + */ + WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); + prd_table[i].size = cpu_to_le32(len - 1); + prd_table[i].addr = cpu_to_le64(sg->dma_address); + prd_table[i].reserved = 0; + } + } else { + lrbp->utr_descriptor_ptr->prd_table_length = 0; + } + + return 0; +} + +/** + * ufshcd_enable_intr - enable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (hba->ufs_version == ufshci_version(1, 0)) { + u32 rw; + rw = set & INTERRUPT_MASK_RW_VER_10; + set = rw | ((set ^ intrs) & intrs); + } else { + set |= intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_disable_intr - disable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (hba->ufs_version == ufshci_version(1, 0)) { + u32 rw; + rw = (set & INTERRUPT_MASK_RW_VER_10) & + ~(intrs & INTERRUPT_MASK_RW_VER_10); + set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); + + } else { + set &= ~intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_prepare_req_desc_hdr() - Fills the requests header + * descriptor according to request + * @lrbp: pointer to local reference block + * @upiu_flags: flags required in the header + * @cmd_dir: requests data direction + */ +static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, + u8 *upiu_flags, enum dma_data_direction cmd_dir) +{ + struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; + u32 data_direction; + u32 dword_0; + u32 dword_1 = 0; + u32 dword_3 = 0; + + if (cmd_dir == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + *upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (cmd_dir == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + *upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + *upiu_flags = UPIU_CMD_FLAGS_NONE; + } + + dword_0 = data_direction | (lrbp->command_type + << UPIU_COMMAND_TYPE_OFFSET); + if (lrbp->intr_cmd) + dword_0 |= UTP_REQ_DESC_INT_CMD; + + /* Prepare crypto related dwords */ + ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3); + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = cpu_to_le32(dword_0); + req_desc->header.dword_1 = cpu_to_le32(dword_1); + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + req_desc->header.dword_3 = cpu_to_le32(dword_3); + + req_desc->prd_table_length = 0; +} + +/** + * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, + * for scsi commands + * @lrbp: local reference block pointer + * @upiu_flags: flags + */ +static +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + unsigned short cdb_len; + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_COMMAND, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); + + /* Total EHS length and Data segment length will be zero */ + ucd_req_ptr->header.dword_2 = 0; + + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); + + cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); + memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); + memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); +} + +/** + * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, + * for query requsts + * @hba: UFS hba + * @lrbp: local reference block pointer + * @upiu_flags: flags + */ +static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u8 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + struct ufs_query *query = &hba->dev_cmd.query; + u16 len = be16_to_cpu(query->request.upiu_req.length); + + /* Query request header */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_QUERY_REQ, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + 0, query->request.query_func, 0, 0); + + /* Data segment length only need for WRITE_DESC */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + ucd_req_ptr->header.dword_2 = + UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); + else + ucd_req_ptr->header.dword_2 = 0; + + /* Copy the Query Request buffer as is */ + memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, + QUERY_OSF_SIZE); + + /* Copy the Descriptor */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + memcpy(ucd_req_ptr + 1, query->descriptor, len); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); +} + +static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + + memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD( + UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); + /* clear rest of the fields of basic header */ + ucd_req_ptr->header.dword_1 = 0; + ucd_req_ptr->header.dword_2 = 0; + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); +} + +/** + * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU) + * for Device Management Purposes + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + u8 upiu_flags; + int ret = 0; + + if (hba->ufs_version <= ufshci_version(1, 1)) + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + else + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) + ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); + else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) + ufshcd_prepare_utp_nop_upiu(lrbp); + else + ret = -EINVAL; + + return ret; +} + +/** + * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) + * for SCSI Purposes + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + u8 upiu_flags; + int ret = 0; + + if (hba->ufs_version <= ufshci_version(1, 1)) + lrbp->command_type = UTP_CMD_TYPE_SCSI; + else + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + if (likely(lrbp->cmd)) { + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, + lrbp->cmd->sc_data_direction); + ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); + } else { + ret = -EINVAL; + } + + return ret; +} + +/** + * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID + * @upiu_wlun_id: UPIU W-LUN id + * + * Returns SCSI W-LUN id + */ +static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) +{ + return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; +} + +static inline bool is_device_wlun(struct scsi_device *sdev) +{ + return sdev->lun == + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); +} + +/* + * Associate the UFS controller queue with the default and poll HCTX types. + * Initialize the mq_map[] arrays. + */ +static int ufshcd_map_queues(struct Scsi_Host *shost) +{ + int i, ret; + + for (i = 0; i < shost->nr_maps; i++) { + struct blk_mq_queue_map *map = &shost->tag_set.map[i]; + + switch (i) { + case HCTX_TYPE_DEFAULT: + case HCTX_TYPE_POLL: + map->nr_queues = 1; + break; + case HCTX_TYPE_READ: + map->nr_queues = 0; + continue; + default: + WARN_ON_ONCE(true); + } + map->queue_offset = 0; + ret = blk_mq_map_queues(map); + WARN_ON_ONCE(ret); + } + + return 0; +} + +static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) +{ + struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr; + struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; + dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + + i * sizeof(struct utp_transfer_cmd_desc); + u16 response_offset = offsetof(struct utp_transfer_cmd_desc, + response_upiu); + u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); + + lrb->utr_descriptor_ptr = utrdlp + i; + lrb->utrd_dma_addr = hba->utrdl_dma_addr + + i * sizeof(struct utp_transfer_req_desc); + lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i); + lrb->ucd_req_dma_addr = cmd_desc_element_addr; + lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; + lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; + lrb->ucd_prdt_ptr = cmd_descp[i].prd_table; + lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; +} + +/** + * ufshcd_queuecommand - main entry point for SCSI requests + * @host: SCSI host pointer + * @cmd: command from SCSI Midlayer + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; + struct ufshcd_lrb *lrbp; + int err = 0; + + WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); + + /* + * Allows the UFS error handler to wait for prior ufshcd_queuecommand() + * calls. + */ + rcu_read_lock(); + + switch (hba->ufshcd_state) { + case UFSHCD_STATE_OPERATIONAL: + break; + case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: + /* + * SCSI error handler can call ->queuecommand() while UFS error + * handler is in progress. Error interrupts could change the + * state from UFSHCD_STATE_RESET to + * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests + * being issued in that case. + */ + if (ufshcd_eh_in_progress(hba)) { + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + break; + case UFSHCD_STATE_EH_SCHEDULED_FATAL: + /* + * pm_runtime_get_sync() is used at error handling preparation + * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's + * PM ops, it can never be finished if we let SCSI layer keep + * retrying it, which gets err handler stuck forever. Neither + * can we let the scsi cmd pass through, because UFS is in bad + * state, the scsi cmd may eventually time out, which will get + * err handler blocked for too long. So, just fail the scsi cmd + * sent from PM ops, err handler can recover PM error anyways. + */ + if (hba->pm_op_in_progress) { + hba->force_reset = true; + set_host_byte(cmd, DID_BAD_TARGET); + scsi_done(cmd); + goto out; + } + fallthrough; + case UFSHCD_STATE_RESET: + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + case UFSHCD_STATE_ERROR: + set_host_byte(cmd, DID_ERROR); + scsi_done(cmd); + goto out; + } + + hba->req_abort_count = 0; + + err = ufshcd_hold(hba, true); + if (err) { + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + WARN_ON(ufshcd_is_clkgating_allowed(hba) && + (hba->clk_gating.state != CLKS_ON)); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + + ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); + + lrbp->req_abort_skip = false; + + ufshpb_prep(hba, lrbp); + + ufshcd_comp_scsi_upiu(hba, lrbp); + + err = ufshcd_map_sg(hba, lrbp); + if (err) { + lrbp->cmd = NULL; + ufshcd_release(hba); + goto out; + } + + ufshcd_send_command(hba, tag); + +out: + rcu_read_unlock(); + + if (ufs_trigger_eh()) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_schedule_eh_work(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + + return err; +} + +static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) +{ + lrbp->cmd = NULL; + lrbp->task_tag = tag; + lrbp->lun = 0; /* device management cmd is not specific to any LUN */ + lrbp->intr_cmd = true; /* No interrupt aggregation */ + ufshcd_prepare_lrbp_crypto(NULL, lrbp); + hba->dev_cmd.type = cmd_type; + + return ufshcd_compose_devman_upiu(hba, lrbp); +} + +static int +ufshcd_clear_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + unsigned long flags; + u32 mask = 1 << tag; + + /* clear outstanding transaction before retry */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_utrl_clear(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* + * wait for h/w to clear corresponding bit in door-bell. + * max. wait is 1 sec. + */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); + + return err; +} + +static int +ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + /* Get the UPIU response */ + query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> + UPIU_RSP_CODE_OFFSET; + return query_res->response; +} + +/** + * ufshcd_dev_cmd_completion() - handles device management command responses + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int +ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int resp; + int err = 0; + + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + + switch (resp) { + case UPIU_TRANSACTION_NOP_IN: + if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response %x\n", + __func__, resp); + } + break; + case UPIU_TRANSACTION_QUERY_RSP: + err = ufshcd_check_query_response(hba, lrbp); + if (!err) + err = ufshcd_copy_query_response(hba, lrbp); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + err = -EPERM; + dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", + __func__); + break; + default: + err = -EINVAL; + dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", + __func__, resp); + break; + } + + return err; +} + +static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, int max_timeout) +{ + int err = 0; + unsigned long time_left; + unsigned long flags; + + time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + msecs_to_jiffies(max_timeout)); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->dev_cmd.complete = NULL; + if (likely(time_left)) { + err = ufshcd_get_tr_ocs(lrbp); + if (!err) + err = ufshcd_dev_cmd_completion(hba, lrbp); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (!time_left) { + err = -ETIMEDOUT; + dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", + __func__, lrbp->task_tag); + if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) + /* successfully cleared the command, retry if needed */ + err = -EAGAIN; + /* + * in case of an error, after clearing the doorbell, + * we also need to clear the outstanding_request + * field in hba + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + } + + return err; +} + +/** + * ufshcd_exec_dev_cmd - API for sending device management requests + * @hba: UFS hba + * @cmd_type: specifies the type (NOP, Query...) + * @timeout: timeout in milliseconds + * + * NOTE: Since there is only one available tag for device management commands, + * it is expected you hold the hba->dev_cmd.lock mutex. + */ +static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout) +{ + DECLARE_COMPLETION_ONSTACK(wait); + const u32 tag = hba->reserved_slot; + struct ufshcd_lrb *lrbp; + int err; + + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); + + down_read(&hba->clk_scaling_lock); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); + if (unlikely(err)) + goto out; + + hba->dev_cmd.complete = &wait; + + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); + + ufshcd_send_command(hba, tag); + err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); + +out: + up_read(&hba->clk_scaling_lock); + return err; +} + +/** + * ufshcd_init_query() - init the query response and request parameters + * @hba: per-adapter instance + * @request: address of the request pointer to be initialized + * @response: address of the response pointer to be initialized + * @opcode: operation to perform + * @idn: flag idn to access + * @index: LU number to access + * @selector: query/flag/descriptor further identification + */ +static inline void ufshcd_init_query(struct ufs_hba *hba, + struct ufs_query_req **request, struct ufs_query_res **response, + enum query_opcode opcode, u8 idn, u8 index, u8 selector) +{ + *request = &hba->dev_cmd.query.request; + *response = &hba->dev_cmd.query.response; + memset(*request, 0, sizeof(struct ufs_query_req)); + memset(*response, 0, sizeof(struct ufs_query_res)); + (*request)->upiu_req.opcode = opcode; + (*request)->upiu_req.idn = idn; + (*request)->upiu_req.index = index; + (*request)->upiu_req.selector = selector; +} + +static int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + +/** + * ufshcd_query_flag() - API function for sending flag query requests + * @hba: per-adapter instance + * @opcode: flag query to perform + * @idn: flag idn to access + * @index: flag index to access + * @flag_res: the flag value after the query request completes + * + * Returns 0 for success, non-zero in case of failure + */ +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, u8 index, bool *flag_res) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err, selector = 0; + int timeout = QUERY_REQ_TIMEOUT; + + BUG_ON(!hba); + + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out_unlock; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); + + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out_unlock; + } + + if (flag_res) + *flag_res = (be32_to_cpu(response->upiu_res.value) & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_query_attr - API function for sending attribute requests + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request completes + * + * Returns 0 for success, non-zero in case of failure +*/ +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + BUG_ON(!hba); + + if (!attr_val) { + dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", + __func__, opcode); + return -EINVAL; + } + + ufshcd_hold(hba, false); + + mutex_lock(&hba->dev_cmd.lock); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + request->upiu_req.value = cpu_to_be32(*attr_val); + break; + case UPIU_QUERY_OPCODE_READ_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out_unlock; + } + + *attr_val = be32_to_cpu(response->upiu_res.value); + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request + * completes + * + * Returns 0 for success, non-zero in case of failure +*/ +int ufshcd_query_attr_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val) +{ + int ret = 0; + u32 retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + ret = ufshcd_query_attr(hba, opcode, idn, index, + selector, attr_val); + if (ret) + dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, idn %d, failed with error %d after %d retires\n", + __func__, idn, ret, QUERY_REQ_RETRIES); + return ret; +} + +static int __ufshcd_query_descriptor(struct ufs_hba *hba, + enum query_opcode opcode, enum desc_idn idn, u8 index, + u8 selector, u8 *desc_buf, int *buf_len) +{ + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + BUG_ON(!hba); + + if (!desc_buf) { + dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", + __func__, opcode); + return -EINVAL; + } + + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { + dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", + __func__, *buf_len); + return -EINVAL; + } + + ufshcd_hold(hba, false); + + mutex_lock(&hba->dev_cmd.lock); + ufshcd_init_query(hba, &request, &response, opcode, idn, index, + selector); + hba->dev_cmd.query.descriptor = desc_buf; + request->upiu_req.length = cpu_to_be16(*buf_len); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_DESC: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, + "%s: Expected query descriptor opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", + __func__, opcode, idn, index, err); + goto out_unlock; + } + + *buf_len = be16_to_cpu(response->upiu_res.length); + +out_unlock: + hba->dev_cmd.query.descriptor = NULL; + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_query_descriptor_retry - API function for sending descriptor requests + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @desc_buf: the buffer that contains the descriptor + * @buf_len: length parameter passed to the device + * + * Returns 0 for success, non-zero in case of failure. + * The buf_len parameter will contain, on return, the length parameter + * received on the response. + */ +int ufshcd_query_descriptor_retry(struct ufs_hba *hba, + enum query_opcode opcode, + enum desc_idn idn, u8 index, + u8 selector, + u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} + +/** + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_len: mapped desc length (out) + */ +void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_len) +{ + if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 || + desc_id == QUERY_DESC_IDN_RFU_1) + *desc_len = 0; + else + *desc_len = hba->desc_size[desc_id]; +} +EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); + +static void ufshcd_update_desc_length(struct ufs_hba *hba, + enum desc_idn desc_id, int desc_index, + unsigned char desc_len) +{ + if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && + desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT) + /* For UFS 3.1, the normal unit descriptor is 10 bytes larger + * than the RPMB unit, however, both descriptors share the same + * desc_idn, to cover both unit descriptors with one length, we + * choose the normal unit descriptor length by desc_index. + */ + hba->desc_size[desc_id] = desc_len; +} + +/** + * ufshcd_read_desc_param - read the specified descriptor parameter + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_index: descriptor index + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 param_offset, + u8 *param_read_buf, + u8 param_size) +{ + int ret; + u8 *desc_buf; + int buff_len; + bool is_kmalloc = true; + + /* Safety check */ + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) + return -EINVAL; + + /* Get the length of descriptor */ + ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); + if (!buff_len) { + dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); + return -EINVAL; + } + + if (param_offset >= buff_len) { + dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", + __func__, param_offset, desc_id, buff_len); + return -EINVAL; + } + + /* Check whether we need temp memory */ + if (param_offset != 0 || param_size < buff_len) { + desc_buf = kzalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + } else { + desc_buf = param_read_buf; + is_kmalloc = false; + } + + /* Request for full descriptor */ + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, + desc_buf, &buff_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", + __func__, desc_id, desc_index, param_offset, ret); + goto out; + } + + /* Sanity check */ + if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { + dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", + __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); + ret = -EINVAL; + goto out; + } + + /* Update descriptor length */ + buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; + ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); + + if (is_kmalloc) { + /* Make sure we don't copy more data than available */ + if (param_offset >= buff_len) + ret = -EINVAL; + else + memcpy(param_read_buf, &desc_buf[param_offset], + min_t(u32, param_size, buff_len - param_offset)); + } +out: + if (is_kmalloc) + kfree(desc_buf); + return ret; +} + +/** + * struct uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char ufshcd_remove_non_printable(u8 ch) +{ + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; +} + +/** + * ufshcd_read_string_desc - read string descriptor + * @hba: pointer to adapter instance + * @desc_index: descriptor index + * @buf: pointer to buffer where descriptor would be read, + * the caller should free the memory. + * @ascii: if true convert from unicode to ascii characters + * null terminated string. + * + * Return: + * * string size on success. + * * -ENOMEM: on allocation failure + * * -EINVAL: on a wrong parameter + */ +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii) +{ + struct uc_string_id *uc_str; + u8 *str; + int ret; + + if (!buf) + return -EINVAL; + + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, + (u8 *)uc_str, QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; + goto out; + } + + if (ascii) { + ssize_t ascii_len; + int i; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ret; i++) + str[i] = ufshcd_remove_non_printable(str[i]); + + str[ret++] = '\0'; + + } else { + str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + ret = uc_str->len; + } +out: + *buf = str; + kfree(uc_str); + return ret; +} + +/** + * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter + * @hba: Pointer to adapter instance + * @lun: lun id + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, + int lun, + enum unit_desc_param param_offset, + u8 *param_read_buf, + u32 param_size) +{ + /* + * Unit descriptors are only available for general purpose LUs (LUN id + * from 0 to 7) and RPMB Well known LU. + */ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) + return -EOPNOTSUPP; + + return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, + param_offset, param_read_buf, param_size); +} + +static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) +{ + int err = 0; + u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; + + if (hba->dev_info.wspecversion >= 0x300) { + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0, + &gating_wait); + if (err) + dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", + err, gating_wait); + + if (gating_wait == 0) { + gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US; + dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", + gating_wait); + } + + hba->dev_info.clk_gating_wait_us = gating_wait; + } + + return err; +} + +/** + * ufshcd_memory_alloc - allocate memory for host memory space data structures + * @hba: per adapter instance + * + * 1. Allocate DMA memory for Command Descriptor array + * Each command descriptor consist of Command UPIU, Response UPIU and PRDT + * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). + * 3. Allocate DMA memory for UTP Task Management Request Descriptor List + * (UTMRDL) + * 4. Allocate memory for local reference block(lrb). + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_memory_alloc(struct ufs_hba *hba) +{ + size_t utmrdl_size, utrdl_size, ucdl_size; + + /* Allocate memory for UTP command descriptors */ + ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); + hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, + ucdl_size, + &hba->ucdl_dma_addr, + GFP_KERNEL); + + /* + * UFSHCI requires UTP command descriptor to be 128 byte aligned. + * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE + * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will + * be aligned to 128 bytes as well + */ + if (!hba->ucdl_base_addr || + WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(hba->dev, + "Command Descriptor Memory allocation failed\n"); + goto out; + } + + /* + * Allocate memory for UTP Transfer descriptors + * UFSHCI requires 1024 byte alignment of UTRD + */ + utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); + hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, + utrdl_size, + &hba->utrdl_dma_addr, + GFP_KERNEL); + if (!hba->utrdl_base_addr || + WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(hba->dev, + "Transfer Descriptor Memory allocation failed\n"); + goto out; + } + + /* + * Allocate memory for UTP Task Management descriptors + * UFSHCI requires 1024 byte alignment of UTMRD + */ + utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; + hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, + utmrdl_size, + &hba->utmrdl_dma_addr, + GFP_KERNEL); + if (!hba->utmrdl_base_addr || + WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(hba->dev, + "Task Management Descriptor Memory allocation failed\n"); + goto out; + } + + /* Allocate memory for local reference block */ + hba->lrb = devm_kcalloc(hba->dev, + hba->nutrs, sizeof(struct ufshcd_lrb), + GFP_KERNEL); + if (!hba->lrb) { + dev_err(hba->dev, "LRB Memory allocation failed\n"); + goto out; + } + return 0; +out: + return -ENOMEM; +} + +/** + * ufshcd_host_memory_configure - configure local reference block with + * memory offsets + * @hba: per adapter instance + * + * Configure Host memory space + * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA + * address. + * 2. Update each UTRD with Response UPIU offset, Response UPIU length + * and PRDT offset. + * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT + * into local reference block. + */ +static void ufshcd_host_memory_configure(struct ufs_hba *hba) +{ + struct utp_transfer_req_desc *utrdlp; + dma_addr_t cmd_desc_dma_addr; + dma_addr_t cmd_desc_element_addr; + u16 response_offset; + u16 prdt_offset; + int cmd_desc_size; + int i; + + utrdlp = hba->utrdl_base_addr; + + response_offset = + offsetof(struct utp_transfer_cmd_desc, response_upiu); + prdt_offset = + offsetof(struct utp_transfer_cmd_desc, prd_table); + + cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); + cmd_desc_dma_addr = hba->ucdl_dma_addr; + + for (i = 0; i < hba->nutrs; i++) { + /* Configure UTRD with command descriptor base address */ + cmd_desc_element_addr = + (cmd_desc_dma_addr + (cmd_desc_size * i)); + utrdlp[i].command_desc_base_addr_lo = + cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); + utrdlp[i].command_desc_base_addr_hi = + cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); + + /* Response upiu and prdt offset should be in double words */ + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE); + } else { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset >> 2); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset >> 2); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + } + + ufshcd_init_lrb(hba, &hba->lrb[i], i); + } +} + +/** + * ufshcd_dme_link_startup - Notify Unipro to perform link startup + * @hba: per adapter instance + * + * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, + * in order to initialize the Unipro link startup procedure. + * Once the Unipro links are up, the device connected to the controller + * is detected. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_link_startup(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, + "dme-link-startup: error code %d\n", ret); + return ret; +} +/** + * ufshcd_dme_reset - UIC command for DME_RESET + * @hba: per adapter instance + * + * DME_RESET command is issued in order to reset UniPro stack. + * This function now deals with cold reset. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_reset(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_RESET; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-reset: error code %d\n", ret); + + return ret; +} + +int ufshcd_dme_configure_adapt(struct ufs_hba *hba, + int agreed_gear, + int adapt_val) +{ + int ret; + + if (agreed_gear != UFS_HS_G4) + adapt_val = PA_NO_ADAPT; + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_TXHSADAPTTYPE), + adapt_val); + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); + +/** + * ufshcd_dme_enable - UIC command for DME_ENABLE + * @hba: per adapter instance + * + * DME_ENABLE command is issued in order to enable UniPro stack. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_enable(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_ENABLE; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-enable: error code %d\n", ret); + + return ret; +} + +static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) +{ + #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 + unsigned long min_sleep_time_us; + + if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) + return; + + /* + * last_dme_cmd_tstamp will be 0 only for 1st call to + * this function + */ + if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { + min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; + } else { + unsigned long delta = + (unsigned long) ktime_to_us( + ktime_sub(ktime_get(), + hba->last_dme_cmd_tstamp)); + + if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) + min_sleep_time_us = + MIN_DELAY_BEFORE_DME_CMDS_US - delta; + else + return; /* no more delay required */ + } + + /* allow sleep for extra 50us if needed */ + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); +} + +/** + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @attr_set: attribute set type as uic command argument2 + * @mib_val: setting value as uic command argument3 + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-set", + "dme-peer-set" + }; + const char *set = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; + uic_cmd.argument1 = attr_sel; + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); + uic_cmd.argument3 = mib_val; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + UFS_UIC_COMMAND_RETRIES - retries); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); + +/** + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @mib_val: the value of the attribute as returned by the UIC command + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-get", + "dme-peer-get" + }; + const char *get = action[!!peer]; + int ret; + int retries = UFS_UIC_COMMAND_RETRIES; + struct ufs_pa_layer_attr orig_pwr_info; + struct ufs_pa_layer_attr temp_pwr_info; + bool pwr_mode_change = false; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { + orig_pwr_info = hba->pwr_info; + temp_pwr_info = orig_pwr_info; + + if (orig_pwr_info.pwr_tx == FAST_MODE || + orig_pwr_info.pwr_rx == FAST_MODE) { + temp_pwr_info.pwr_tx = FASTAUTO_MODE; + temp_pwr_info.pwr_rx = FASTAUTO_MODE; + pwr_mode_change = true; + } else if (orig_pwr_info.pwr_tx == SLOW_MODE || + orig_pwr_info.pwr_rx == SLOW_MODE) { + temp_pwr_info.pwr_tx = SLOWAUTO_MODE; + temp_pwr_info.pwr_rx = SLOWAUTO_MODE; + pwr_mode_change = true; + } + if (pwr_mode_change) { + ret = ufshcd_change_power_mode(hba, &temp_pwr_info); + if (ret) + goto out; + } + } + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; + uic_cmd.argument1 = attr_sel; + + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + } while (ret && peer && --retries); + + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", + get, UIC_GET_ATTR_ID(attr_sel), + UFS_UIC_COMMAND_RETRIES - retries); + + if (mib_val && !ret) + *mib_val = uic_cmd.argument3; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) + && pwr_mode_change) + ufshcd_change_power_mode(hba, &orig_pwr_info); +out: + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); + +/** + * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power + * state) and waits for it to take effect. + * + * @hba: per adapter instance + * @cmd: UIC command to execute + * + * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & + * DME_HIBERNATE_EXIT commands take some time to take its effect on both host + * and device UniPro link and hence it's final completion would be indicated by + * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in + * addition to normal UIC command completion Status (UCCS). This function only + * returns after the relevant status bits indicate the completion. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) +{ + DECLARE_COMPLETION_ONSTACK(uic_async_done); + unsigned long flags; + u8 status; + int ret; + bool reenable_intr = false; + + mutex_lock(&hba->uic_cmd_mutex); + ufshcd_add_delay_before_dme_cmd(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ufshcd_is_link_broken(hba)) { + ret = -ENOLINK; + goto out_unlock; + } + hba->uic_async_done = &uic_async_done; + if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); + /* + * Make sure UIC command completion interrupt is disabled before + * issuing UIC command. + */ + wmb(); + reenable_intr = true; + } + ret = __ufshcd_send_uic_cmd(hba, cmd, false); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); + goto out; + } + + if (!wait_for_completion_timeout(hba->uic_async_done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", + cmd->command, cmd->argument3); + + if (!cmd->cmd_active) { + dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", + __func__); + goto check_upmcrs; + } + + ret = -ETIMEDOUT; + goto out; + } + +check_upmcrs: + status = ufshcd_get_upmcrs(hba); + if (status != PWR_LOCAL) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", + cmd->command, status); + ret = (status != PWR_OK) ? status : -1; + } +out: + if (ret) { + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_evt_hist(hba); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; + hba->uic_async_done = NULL; + if (reenable_intr) + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + if (ret) { + ufshcd_set_link_broken(hba); + ufshcd_schedule_eh_work(hba); + } +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); + mutex_unlock(&hba->uic_cmd_mutex); + + return ret; +} + +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage + * using DME_SET primitives. + * @hba: per adapter instance + * @mode: powr mode value + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + int ret; + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); + if (ret) { + dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", + __func__, ret); + goto out; + } + } + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + ufshcd_hold(hba, false); + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + ufshcd_release(hba); + +out: + return ret; +} + +int ufshcd_link_recovery(struct ufs_hba *hba) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + ret = ufshcd_host_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + dev_err(hba->dev, "%s: link recovery failed, err %d", + __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_link_recovery); + +int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + int ret; + struct uic_command uic_cmd = {0}; + ktime_t start = ktime_get(); + + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); + + uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + + if (ret) + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", + __func__, ret); + else + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, + POST_CHANGE); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); + +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + ktime_t start = ktime_get(); + + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); + + uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + + if (ret) { + dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", + __func__, ret); + } else { + ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, + POST_CHANGE); + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); + hba->ufs_stats.hibern8_exit_cnt++; + } + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); + +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) +{ + unsigned long flags; + bool update = false; + + if (!ufshcd_is_auto_hibern8_supported(hba)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ahit != ahit) { + hba->ahit = ahit; + update = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (update && + !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { + ufshcd_rpm_get_sync(hba); + ufshcd_hold(hba, false); + ufshcd_auto_hibern8_enable(hba); + ufshcd_release(hba); + ufshcd_rpm_put_sync(hba); + } +} +EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); + +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) +{ + if (!ufshcd_is_auto_hibern8_supported(hba)) + return; + + ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); +} + + /** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info + * @hba: per-adapter instance + */ +static void ufshcd_init_pwr_info(struct ufs_hba *hba) +{ + hba->pwr_info.gear_rx = UFS_PWM_G1; + hba->pwr_info.gear_tx = UFS_PWM_G1; + hba->pwr_info.lane_rx = 1; + hba->pwr_info.lane_tx = 1; + hba->pwr_info.pwr_rx = SLOWAUTO_MODE; + hba->pwr_info.pwr_tx = SLOWAUTO_MODE; + hba->pwr_info.hs_rate = 0; +} + +/** + * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device + * @hba: per-adapter instance + */ +static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + + if (hba->max_pwr_info.is_valid) + return 0; + + pwr_info->pwr_tx = FAST_MODE; + pwr_info->pwr_rx = FAST_MODE; + pwr_info->hs_rate = PA_HS_MODE_B; + + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), + &pwr_info->lane_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &pwr_info->lane_tx); + + if (!pwr_info->lane_rx || !pwr_info->lane_tx) { + dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + + /* + * First, get the maximum gears of HS speed. + * If a zero value, it means there is no HSGEAR capability. + * Then, get the maximum gears of PWM speed. + */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", + __func__, pwr_info->gear_rx); + return -EINVAL; + } + pwr_info->pwr_rx = SLOW_MODE; + } + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", + __func__, pwr_info->gear_tx); + return -EINVAL; + } + pwr_info->pwr_tx = SLOW_MODE; + } + + hba->max_pwr_info.is_valid = true; + return 0; +} + +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + /* if already configured to the requested pwr_mode */ + if (!hba->force_pmc && + pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && + pwr_mode->lane_rx == hba->pwr_info.lane_rx && + pwr_mode->lane_tx == hba->pwr_info.lane_tx && + pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && + pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && + pwr_mode->hs_rate == hba->pwr_info.hs_rate) { + dev_dbg(hba->dev, "%s: power already configured\n", __func__); + return 0; + } + + /* + * Configure attributes for power mode change with below. + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, + * - PA_HSSERIES + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + pwr_mode->lane_rx); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + pwr_mode->lane_tx); + if (pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); + + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + pwr_mode->hs_rate); + + if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + } + + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 + | pwr_mode->pwr_tx); + + if (ret) { + dev_err(hba->dev, + "%s: power mode change failed %d\n", __func__, ret); + } else { + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, + pwr_mode); + + memcpy(&hba->pwr_info, pwr_mode, + sizeof(struct ufs_pa_layer_attr)); + } + + return ret; +} + +/** + * ufshcd_config_pwr_mode - configure a new power mode + * @hba: per-adapter instance + * @desired_pwr_mode: desired power configuration + */ +int ufshcd_config_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *desired_pwr_mode) +{ + struct ufs_pa_layer_attr final_params = { 0 }; + int ret; + + ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, + desired_pwr_mode, &final_params); + + if (ret) + memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); + + ret = ufshcd_change_power_mode(hba, &final_params); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); + +/** + * ufshcd_complete_dev_init() - checks device readiness + * @hba: per-adapter instance + * + * Set fDeviceInit flag and poll until device toggles it. + */ +static int ufshcd_complete_dev_init(struct ufs_hba *hba) +{ + int err; + bool flag_res = true; + ktime_t timeout; + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); + if (err) { + dev_err(hba->dev, + "%s setting fDeviceInit flag failed with error %d\n", + __func__, err); + goto out; + } + + /* Poll fDeviceInit flag to be cleared */ + timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT); + do { + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); + if (!flag_res) + break; + usleep_range(500, 1000); + } while (ktime_before(ktime_get(), timeout)); + + if (err) { + dev_err(hba->dev, + "%s reading fDeviceInit flag failed with error %d\n", + __func__, err); + } else if (flag_res) { + dev_err(hba->dev, + "%s fDeviceInit was not cleared by the device\n", + __func__); + err = -EBUSY; + } +out: + return err; +} + +/** + * ufshcd_make_hba_operational - Make UFS controller operational + * @hba: per adapter instance + * + * To bring UFS host controller to operational state, + * 1. Enable required interrupts + * 2. Configure interrupt aggregation + * 3. Program UTRL and UTMRL base address + * 4. Configure run-stop-registers + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_make_hba_operational(struct ufs_hba *hba) +{ + int err = 0; + u32 reg; + + /* Enable required interrupts */ + ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); + + /* Configure interrupt aggregation */ + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); + else + ufshcd_disable_intr_aggr(hba); + + /* Configure UTRL and UTMRL base address registers */ + ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_H); + ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_H); + + /* + * Make sure base address and interrupt setup are updated before + * enabling the run/stop registers below. + */ + wmb(); + + /* + * UCRDY, UTMRLDY and UTRLRDY bits must be 1 + */ + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); + if (!(ufshcd_get_lists_status(reg))) { + ufshcd_enable_run_stop_reg(hba); + } else { + dev_err(hba->dev, + "Host controller not ready to process requests"); + err = -EIO; + } + + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); + +/** + * ufshcd_hba_stop - Send controller to reset state + * @hba: per adapter instance + */ +void ufshcd_hba_stop(struct ufs_hba *hba) +{ + unsigned long flags; + int err; + + /* + * Obtain the host lock to prevent that the controller is disabled + * while the UFS interrupt handler is active on another CPU. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10, 1); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} +EXPORT_SYMBOL_GPL(ufshcd_hba_stop); + +/** + * ufshcd_hba_execute_hce - initialize the controller + * @hba: per adapter instance + * + * The controller resets itself and controller firmware initialization + * sequence kicks off. When controller is ready it will set + * the Host Controller Enable bit to 1. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_hba_execute_hce(struct ufs_hba *hba) +{ + int retry_outer = 3; + int retry_inner; + +start: + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); + + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); + + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + + /* start controller initialization sequence */ + ufshcd_hba_start(hba); + + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + + /* wait for the host controller to complete initialization */ + retry_inner = 50; + while (!ufshcd_is_hba_active(hba)) { + if (retry_inner) { + retry_inner--; + } else { + dev_err(hba->dev, + "Controller enable failed\n"); + if (retry_outer) { + retry_outer--; + goto start; + } + return -EIO; + } + usleep_range(1000, 1100); + } + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); + + return 0; +} + +int ufshcd_hba_enable(struct ufs_hba *hba) +{ + int ret; + + if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { + ufshcd_set_link_off(hba); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + ret = ufshcd_dme_reset(hba); + if (!ret) { + ret = ufshcd_dme_enable(hba); + if (!ret) + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); + if (ret) + dev_err(hba->dev, + "Host controller enable failed with non-hce\n"); + } + } else { + ret = ufshcd_hba_execute_hce(hba); + } + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_hba_enable); + +static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) +{ + int tx_lanes = 0, i, err = 0; + + if (!peer) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + else + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + for (i = 0; i < tx_lanes; i++) { + if (!peer) + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), + 0); + else + err = ufshcd_dme_peer_set(hba, + UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), + 0); + if (err) { + dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", + __func__, peer, i, err); + break; + } + } + + return err; +} + +static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) +{ + return ufshcd_disable_tx_lcc(hba, true); +} + +void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) +{ + struct ufs_event_hist *e; + + if (id >= UFS_EVT_CNT) + return; + + e = &hba->ufs_stats.event[id]; + e->val[e->pos] = val; + e->tstamp[e->pos] = ktime_get(); + e->cnt += 1; + e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; + + ufshcd_vops_event_notify(hba, id, &val); +} +EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist); + +/** + * ufshcd_link_startup - Initialize unipro link startup + * @hba: per adapter instance + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_link_startup(struct ufs_hba *hba) +{ + int ret; + int retries = DME_LINKSTARTUP_RETRIES; + bool link_startup_again = false; + + /* + * If UFS device isn't active then we will have to issue link startup + * 2 times to make sure the device state move to active. + */ + if (!ufshcd_is_ufs_dev_active(hba)) + link_startup_again = true; + +link_startup: + do { + ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); + + ret = ufshcd_dme_link_startup(hba); + + /* check if device is detected by inter-connect layer */ + if (!ret && !ufshcd_is_device_present(hba)) { + ufshcd_update_evt_hist(hba, + UFS_EVT_LINK_STARTUP_FAIL, + 0); + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } + + /* + * DME link lost indication is only received when link is up, + * but we can't be sure if the link is up until link startup + * succeeds. So reset the local Uni-Pro and try again. + */ + if (ret && ufshcd_hba_enable(hba)) { + ufshcd_update_evt_hist(hba, + UFS_EVT_LINK_STARTUP_FAIL, + (u32)ret); + goto out; + } + } while (ret && retries--); + + if (ret) { + /* failed to get the link up... retire */ + ufshcd_update_evt_hist(hba, + UFS_EVT_LINK_STARTUP_FAIL, + (u32)ret); + goto out; + } + + if (link_startup_again) { + link_startup_again = false; + retries = DME_LINKSTARTUP_RETRIES; + goto link_startup; + } + + /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ + ufshcd_init_pwr_info(hba); + ufshcd_print_pwr_info(hba); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { + ret = ufshcd_disable_device_tx_lcc(hba); + if (ret) + goto out; + } + + /* Include any host controller configuration via UIC commands */ + ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; + + /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */ + ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + ret = ufshcd_make_hba_operational(hba); +out: + if (ret) { + dev_err(hba->dev, "link startup failed %d\n", ret); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_evt_hist(hba); + } + return ret; +} + +/** + * ufshcd_verify_dev_init() - Verify device initialization + * @hba: per-adapter instance + * + * Send NOP OUT UPIU and wait for NOP IN response to check whether the + * device Transport Protocol (UTP) layer is ready after a reset. + * If the UTP layer at the device side is not initialized, it may + * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT + * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. + */ +static int ufshcd_verify_dev_init(struct ufs_hba *hba) +{ + int err = 0; + int retries; + + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, + hba->nop_out_timeout); + + if (!err || err == -ETIMEDOUT) + break; + + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + if (err) + dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); + return err; +} + +/** + * ufshcd_set_queue_depth - set lun queue depth + * @sdev: pointer to SCSI device + * + * Read bLUQueueDepth value and activate scsi tagged command + * queueing. For WLUN, queue depth is set to 1. For best-effort + * cases (bLUQueueDepth = 0) the queue depth is set to a maximum + * value that host can queue. + */ +static void ufshcd_set_queue_depth(struct scsi_device *sdev) +{ + int ret = 0; + u8 lun_qdepth; + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + lun_qdepth = hba->nutrs; + ret = ufshcd_read_unit_desc_param(hba, + ufshcd_scsi_to_upiu_lun(sdev->lun), + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); + + /* Some WLUN doesn't support unit descriptor */ + if (ret == -EOPNOTSUPP) + lun_qdepth = 1; + else if (!lun_qdepth) + /* eventually, we can figure out the real queue depth */ + lun_qdepth = hba->nutrs; + else + lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); + + dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", + __func__, lun_qdepth); + scsi_change_queue_depth(sdev, lun_qdepth); +} + +/* + * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR + * @hba: per-adapter instance + * @lun: UFS device lun id + * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info + * + * Returns 0 in case of success and b_lu_write_protect status would be returned + * @b_lu_write_protect parameter. + * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. + * Returns -EINVAL in case of invalid parameters passed to this function. + */ +static int ufshcd_get_lu_wp(struct ufs_hba *hba, + u8 lun, + u8 *b_lu_write_protect) +{ + int ret; + + if (!b_lu_write_protect) + ret = -EINVAL; + /* + * According to UFS device spec, RPMB LU can't be write + * protected so skip reading bLUWriteProtect parameter for + * it. For other W-LUs, UNIT DESCRIPTOR is not available. + */ + else if (lun >= hba->dev_info.max_lu_supported) + ret = -ENOTSUPP; + else + ret = ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_LU_WR_PROTECT, + b_lu_write_protect, + sizeof(*b_lu_write_protect)); + return ret; +} + +/** + * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect + * status + * @hba: per-adapter instance + * @sdev: pointer to SCSI device + * + */ +static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, + struct scsi_device *sdev) +{ + if (hba->dev_info.f_power_on_wp_en && + !hba->dev_info.is_lu_power_on_wp) { + u8 b_lu_write_protect; + + if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), + &b_lu_write_protect) && + (b_lu_write_protect == UFS_LU_POWER_ON_WP)) + hba->dev_info.is_lu_power_on_wp = true; + } +} + +/** + * ufshcd_setup_links - associate link b/w device wlun and other luns + * @sdev: pointer to SCSI device + * @hba: pointer to ufs hba + */ +static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct device_link *link; + + /* + * Device wlun is the supplier & rest of the luns are consumers. + * This ensures that device wlun suspends after all other luns. + */ + if (hba->ufs_device_wlun) { + link = device_link_add(&sdev->sdev_gendev, + &hba->ufs_device_wlun->sdev_gendev, + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); + if (!link) { + dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n", + dev_name(&hba->ufs_device_wlun->sdev_gendev)); + return; + } + hba->luns_avail--; + /* Ignore REPORT_LUN wlun probing */ + if (hba->luns_avail == 1) { + ufshcd_rpm_put(hba); + return; + } + } else { + /* + * Device wlun is probed. The assumption is that WLUNs are + * scanned before other LUNs. + */ + hba->luns_avail--; + } +} + +/** + * ufshcd_slave_alloc - handle initial SCSI device configurations + * @sdev: pointer to SCSI device + * + * Returns success + */ +static int ufshcd_slave_alloc(struct scsi_device *sdev) +{ + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ + sdev->use_10_for_ms = 1; + + /* DBD field should be set to 1 in mode sense(10) */ + sdev->set_dbd_for_ms = 1; + + /* allow SCSI layer to restart the device in case of errors */ + sdev->allow_restart = 1; + + /* REPORT SUPPORTED OPERATION CODES is not supported */ + sdev->no_report_opcodes = 1; + + /* WRITE_SAME command is not supported */ + sdev->no_write_same = 1; + + ufshcd_set_queue_depth(sdev); + + ufshcd_get_lu_power_on_wp_status(hba, sdev); + + ufshcd_setup_links(hba, sdev); + + return 0; +} + +/** + * ufshcd_change_queue_depth - change queue depth + * @sdev: pointer to SCSI device + * @depth: required depth to set + * + * Change queue depth and make sure the max. limits are not crossed. + */ +static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) +{ + return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); +} + +static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_destroy_lu(hba, sdev); +} + +static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_init_hpb_lu(hba, sdev); +} + +/** + * ufshcd_slave_configure - adjust SCSI device configurations + * @sdev: pointer to SCSI device + */ +static int ufshcd_slave_configure(struct scsi_device *sdev) +{ + struct ufs_hba *hba = shost_priv(sdev->host); + struct request_queue *q = sdev->request_queue; + + ufshcd_hpb_configure(hba, sdev); + + blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) + blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); + /* + * Block runtime-pm until all consumers are added. + * Refer ufshcd_setup_links(). + */ + if (is_device_wlun(sdev)) + pm_runtime_get_noresume(&sdev->sdev_gendev); + else if (ufshcd_is_rpm_autosuspend_allowed(hba)) + sdev->rpm_autosuspend = 1; + /* + * Do not print messages during runtime PM to avoid never-ending cycles + * of messages written back to storage by user space causing runtime + * resume, causing more messages and so on. + */ + sdev->silence_suspend = 1; + + ufshcd_crypto_register(hba, q); + + return 0; +} + +/** + * ufshcd_slave_destroy - remove SCSI device configurations + * @sdev: pointer to SCSI device + */ +static void ufshcd_slave_destroy(struct scsi_device *sdev) +{ + struct ufs_hba *hba; + unsigned long flags; + + hba = shost_priv(sdev->host); + + ufshcd_hpb_destroy(hba, sdev); + + /* Drop the reference as it won't be needed anymore */ + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufs_device_wlun = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } else if (hba->ufs_device_wlun) { + struct device *supplier = NULL; + + /* Ensure UFS Device WLUN exists and does not disappear */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ufs_device_wlun) { + supplier = &hba->ufs_device_wlun->sdev_gendev; + get_device(supplier); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (supplier) { + /* + * If a LUN fails to probe (e.g. absent BOOT WLUN), the + * device will not have been registered but can still + * have a device link holding a reference to the device. + */ + device_link_remove(&sdev->sdev_gendev, supplier); + put_device(supplier); + } + } +} + +/** + * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status + * @lrbp: pointer to local reference block of completed command + * @scsi_status: SCSI command status + * + * Returns value base on SCSI command status + */ +static inline int +ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) +{ + int result = 0; + + switch (scsi_status) { + case SAM_STAT_CHECK_CONDITION: + ufshcd_copy_sense_data(lrbp); + fallthrough; + case SAM_STAT_GOOD: + result |= DID_OK << 16 | scsi_status; + break; + case SAM_STAT_TASK_SET_FULL: + case SAM_STAT_BUSY: + case SAM_STAT_TASK_ABORTED: + ufshcd_copy_sense_data(lrbp); + result |= scsi_status; + break; + default: + result |= DID_ERROR << 16; + break; + } /* end of switch */ + + return result; +} + +/** + * ufshcd_transfer_rsp_status - Get overall status of the response + * @hba: per adapter instance + * @lrbp: pointer to local reference block of completed command + * + * Returns result of the command to notify SCSI midlayer + */ +static inline int +ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int result = 0; + int scsi_status; + enum utp_ocs ocs; + + /* overall command status of utrd */ + ocs = ufshcd_get_tr_ocs(lrbp); + + if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { + if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) & + MASK_RSP_UPIU_RESULT) + ocs = OCS_SUCCESS; + } + + switch (ocs) { + case OCS_SUCCESS: + result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + switch (result) { + case UPIU_TRANSACTION_RESPONSE: + /* + * get the response UPIU result to extract + * the SCSI command status + */ + result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); + + /* + * get the result based on SCSI status response + * to notify the SCSI midlayer of the command status + */ + scsi_status = result & MASK_SCSI_STATUS; + result = ufshcd_scsi_cmd_status(lrbp, scsi_status); + + /* + * Currently we are only supporting BKOPs exception + * events hence we can ignore BKOPs exception event + * during power management callbacks. BKOPs exception + * event is not expected to be raised in runtime suspend + * callback as it allows the urgent bkops. + * During system suspend, we are anyway forcefully + * disabling the bkops and if urgent bkops is needed + * it will be enabled on system resume. Long term + * solution could be to abort the system suspend if + * UFS device needs urgent BKOPs. + */ + if (!hba->pm_op_in_progress && + !ufshcd_eh_in_progress(hba) && + ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) + /* Flushed in suspend */ + schedule_work(&hba->eeh_work); + + if (scsi_status == SAM_STAT_GOOD) + ufshpb_rsp_upiu(hba, lrbp); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + result = DID_ERROR << 16; + dev_err(hba->dev, + "Reject UPIU not fully implemented\n"); + break; + default: + dev_err(hba->dev, + "Unexpected request response code = %x\n", + result); + result = DID_ERROR << 16; + break; + } + break; + case OCS_ABORTED: + result |= DID_ABORT << 16; + break; + case OCS_INVALID_COMMAND_STATUS: + result |= DID_REQUEUE << 16; + break; + case OCS_INVALID_CMD_TABLE_ATTR: + case OCS_INVALID_PRDT_ATTR: + case OCS_MISMATCH_DATA_BUF_SIZE: + case OCS_MISMATCH_RESP_UPIU_SIZE: + case OCS_PEER_COMM_FAILURE: + case OCS_FATAL_ERROR: + case OCS_DEVICE_FATAL_ERROR: + case OCS_INVALID_CRYPTO_CONFIG: + case OCS_GENERAL_CRYPTO_ERROR: + default: + result |= DID_ERROR << 16; + dev_err(hba->dev, + "OCS error from controller = %x for tag %d\n", + ocs, lrbp->task_tag); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); + break; + } /* end of switch */ + + if ((host_byte(result) != DID_OK) && + (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) + ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); + return result; +} + +static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, + u32 intr_mask) +{ + if (!ufshcd_is_auto_hibern8_supported(hba) || + !ufshcd_is_auto_hibern8_enabled(hba)) + return false; + + if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) + return false; + + if (hba->active_uic_cmd && + (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || + hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) + return false; + + return true; +} + +/** + * ufshcd_uic_cmd_compl - handle completion of uic command + * @hba: per adapter instance + * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +{ + irqreturn_t retval = IRQ_NONE; + + spin_lock(hba->host->host_lock); + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) + hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); + + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { + hba->active_uic_cmd->argument2 |= + ufshcd_get_uic_cmd_result(hba); + hba->active_uic_cmd->argument3 = + ufshcd_get_dme_attr_val(hba); + if (!hba->uic_async_done) + hba->active_uic_cmd->cmd_active = 0; + complete(&hba->active_uic_cmd->done); + retval = IRQ_HANDLED; + } + + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { + hba->active_uic_cmd->cmd_active = 0; + complete(hba->uic_async_done); + retval = IRQ_HANDLED; + } + + if (retval == IRQ_HANDLED) + ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, + UFS_CMD_COMP); + spin_unlock(hba->host->host_lock); + return retval; +} + +/* Release the resources allocated for processing a SCSI command. */ +static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) +{ + struct scsi_cmnd *cmd = lrbp->cmd; + + scsi_dma_unmap(cmd); + lrbp->cmd = NULL; /* Mark the command as completed. */ + ufshcd_release(hba); + ufshcd_clk_scaling_update_busy(hba); +} + +/** + * __ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + * @completed_reqs: bitmask that indicates which requests to complete + */ +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, + unsigned long completed_reqs) +{ + struct ufshcd_lrb *lrbp; + struct scsi_cmnd *cmd; + int index; + + for_each_set_bit(index, &completed_reqs, hba->nutrs) { + lrbp = &hba->lrb[index]; + lrbp->compl_time_stamp = ktime_get(); + cmd = lrbp->cmd; + if (cmd) { + if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) + ufshcd_update_monitor(hba, lrbp); + ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); + cmd->result = ufshcd_transfer_rsp_status(hba, lrbp); + ufshcd_release_scsi_cmd(hba, lrbp); + /* Do not touch lrbp after scsi done */ + scsi_done(cmd); + } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || + lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { + if (hba->dev_cmd.complete) { + ufshcd_add_command_trace(hba, index, + UFS_DEV_COMP); + complete(hba->dev_cmd.complete); + ufshcd_clk_scaling_update_busy(hba); + } + } + } +} + +/* + * Returns > 0 if one or more commands have been completed or 0 if no + * requests have been completed. + */ +static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct ufs_hba *hba = shost_priv(shost); + unsigned long completed_reqs, flags; + u32 tr_doorbell; + + spin_lock_irqsave(&hba->outstanding_lock, flags); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = ~tr_doorbell & hba->outstanding_reqs; + WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, + "completed: %#lx; outstanding: %#lx\n", completed_reqs, + hba->outstanding_reqs); + hba->outstanding_reqs &= ~completed_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (completed_reqs) + __ufshcd_transfer_req_compl(hba, completed_reqs); + + return completed_reqs; +} + +/** + * ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) +{ + /* Resetting interrupt aggregation counters first and reading the + * DOOR_BELL afterward allows us to handle all the completed requests. + * In order to prevent other interrupts starvation the DB is read once + * after reset. The down side of this solution is the possibility of + * false interrupt if device completes another request after resetting + * aggregation and before reading the DB. + */ + if (ufshcd_is_intr_aggr_allowed(hba) && + !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) + ufshcd_reset_intr_aggr(hba); + + if (ufs_fail_completion()) + return IRQ_HANDLED; + + /* + * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we + * do not want polling to trigger spurious interrupt complaints. + */ + ufshcd_poll(hba->host, 0); + + return IRQ_HANDLED; +} + +int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) +{ + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, 0, + &ee_ctrl_mask); +} + +int ufshcd_write_ee_control(struct ufs_hba *hba) +{ + int err; + + mutex_lock(&hba->ee_ctrl_mutex); + err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); + mutex_unlock(&hba->ee_ctrl_mutex); + if (err) + dev_err(hba->dev, "%s: failed to write ee control %d\n", + __func__, err); + return err; +} + +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask, + u16 set, u16 clr) +{ + u16 new_mask, ee_ctrl_mask; + int err = 0; + + mutex_lock(&hba->ee_ctrl_mutex); + new_mask = (*mask & ~clr) | set; + ee_ctrl_mask = new_mask | *other_mask; + if (ee_ctrl_mask != hba->ee_ctrl_mask) + err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); + /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */ + if (!err) { + hba->ee_ctrl_mask = ee_ctrl_mask; + *mask = new_mask; + } + mutex_unlock(&hba->ee_ctrl_mutex); + return err; +} + +/** + * ufshcd_disable_ee - disable exception event + * @hba: per-adapter instance + * @mask: exception event to disable + * + * Disables exception event in the device so that the EVENT_ALERT + * bit is not set. + * + * Returns zero on success, non-zero error value on failure. + */ +static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) +{ + return ufshcd_update_ee_drv_mask(hba, 0, mask); +} + +/** + * ufshcd_enable_ee - enable exception event + * @hba: per-adapter instance + * @mask: exception event to enable + * + * Enable corresponding exception event in the device to allow + * device to alert host in critical scenarios. + * + * Returns zero on success, non-zero error value on failure. + */ +static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) +{ + return ufshcd_update_ee_drv_mask(hba, mask, 0); +} + +/** + * ufshcd_enable_auto_bkops - Allow device managed BKOPS + * @hba: per-adapter instance + * + * Allow device to manage background operations on its own. Enabling + * this might lead to inconsistent latencies during normal data transfers + * as the device is allowed to manage its own way of handling background + * operations. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->auto_bkops_enabled) + goto out; + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to enable bkops %d\n", + __func__, err); + goto out; + } + + hba->auto_bkops_enabled = true; + trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); + + /* No need of URGENT_BKOPS exception from the device */ + err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) + dev_err(hba->dev, "%s: failed to disable exception event %d\n", + __func__, err); +out: + return err; +} + +/** + * ufshcd_disable_auto_bkops - block device in doing background operations + * @hba: per-adapter instance + * + * Disabling background operations improves command response latency but + * has drawback of device moving into critical state where the device is + * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the + * host is idle so that BKOPS are managed effectively without any negative + * impacts. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->auto_bkops_enabled) + goto out; + + /* + * If host assisted BKOPs is to be enabled, make sure + * urgent bkops exception is allowed. + */ + err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) { + dev_err(hba->dev, "%s: failed to enable exception event %d\n", + __func__, err); + goto out; + } + + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to disable bkops %d\n", + __func__, err); + ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + goto out; + } + + hba->auto_bkops_enabled = false; + trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + hba->is_urgent_bkops_lvl_checked = false; +out: + return err; +} + +/** + * ufshcd_force_reset_auto_bkops - force reset auto bkops state + * @hba: per adapter instance + * + * After a device reset the device may toggle the BKOPS_EN flag + * to default value. The s/w tracking variables should be updated + * as well. This function would change the auto-bkops state based on + * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. + */ +static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) +{ + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { + hba->auto_bkops_enabled = false; + hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; + ufshcd_enable_auto_bkops(hba); + } else { + hba->auto_bkops_enabled = true; + hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; + ufshcd_disable_auto_bkops(hba); + } + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; + hba->is_urgent_bkops_lvl_checked = false; +} + +static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); +} + +/** + * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status + * @hba: per-adapter instance + * @status: bkops_status value + * + * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn + * flag in the device to permit background operations if the device + * bkops_status is greater than or equal to "status" argument passed to + * this function, disable otherwise. + * + * Returns 0 for success, non-zero in case of failure. + * + * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag + * to know whether auto bkops is enabled or disabled after this function + * returns control to it. + */ +static int ufshcd_bkops_ctrl(struct ufs_hba *hba, + enum bkops_status status) +{ + int err; + u32 curr_status = 0; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } else if (curr_status > BKOPS_STATUS_MAX) { + dev_err(hba->dev, "%s: invalid BKOPS status %d\n", + __func__, curr_status); + err = -EINVAL; + goto out; + } + + if (curr_status >= status) + err = ufshcd_enable_auto_bkops(hba); + else + err = ufshcd_disable_auto_bkops(hba); +out: + return err; +} + +/** + * ufshcd_urgent_bkops - handle urgent bkops exception event + * @hba: per-adapter instance + * + * Enable fBackgroundOpsEn flag in the device to permit background + * operations. + * + * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled + * and negative error value for any other failure. + */ +static int ufshcd_urgent_bkops(struct ufs_hba *hba) +{ + return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); +} + +static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); +} + +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) +{ + int err; + u32 curr_status = 0; + + if (hba->is_urgent_bkops_lvl_checked) + goto enable_auto_bkops; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + /* + * We are seeing that some devices are raising the urgent bkops + * exception events even when BKOPS status doesn't indicate performace + * impacted or critical. Handle these device by determining their urgent + * bkops status at runtime. + */ + if (curr_status < BKOPS_STATUS_PERF_IMPACT) { + dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", + __func__, curr_status); + /* update the current status as the urgent bkops level */ + hba->urgent_bkops_lvl = curr_status; + hba->is_urgent_bkops_lvl_checked = true; + } + +enable_auto_bkops: + err = ufshcd_enable_auto_bkops(hba); +out: + if (err < 0) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); +} + +static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) +{ + u32 value; + + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) + return; + + dev_info(hba->dev, "exception Tcase %d\n", value - 80); + + ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); + + /* + * A placeholder for the platform vendors to add whatever additional + * steps required + */ +} + +static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) +{ + u8 index; + enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG : + UPIU_QUERY_OPCODE_CLEAR_FLAG; + + index = ufshcd_wb_get_query_index(hba); + return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); +} + +int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) +{ + int ret; + + if (!ufshcd_is_wb_allowed(hba)) + return 0; + + if (!(enable ^ hba->dev_info.wb_enabled)) + return 0; + + ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); + if (ret) { + dev_err(hba->dev, "%s Write Booster %s failed %d\n", + __func__, enable ? "enable" : "disable", ret); + return ret; + } + + hba->dev_info.wb_enabled = enable; + dev_info(hba->dev, "%s Write Booster %s\n", + __func__, enable ? "enabled" : "disabled"); + + return ret; +} + +static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set) +{ + int ret; + + ret = __ufshcd_wb_toggle(hba, set, + QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8); + if (ret) { + dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n", + __func__, set ? "enable" : "disable", ret); + return; + } + dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n", + __func__, set ? "enabled" : "disabled"); +} + +static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) +{ + int ret; + + if (!ufshcd_is_wb_allowed(hba) || + hba->dev_info.wb_buf_flush_enabled == enable) + return; + + ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); + if (ret) { + dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__, + enable ? "enable" : "disable", ret); + return; + } + + hba->dev_info.wb_buf_flush_enabled = enable; + + dev_dbg(hba->dev, "%s WB-Buf Flush %s\n", + __func__, enable ? "enabled" : "disabled"); +} + +static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, + u32 avail_buf) +{ + u32 cur_buf; + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE, + index, 0, &cur_buf); + if (ret) { + dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n", + __func__, ret); + return false; + } + + if (!cur_buf) { + dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", + cur_buf); + return false; + } + /* Let it continue to flush when available buffer exceeds threshold */ + return avail_buf < hba->vps->wb_flush_threshold; +} + +static void ufshcd_wb_force_disable(struct ufs_hba *hba) +{ + if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) + ufshcd_wb_toggle_flush(hba, false); + + ufshcd_wb_toggle_flush_during_h8(hba, false); + ufshcd_wb_toggle(hba, false); + hba->caps &= ~UFSHCD_CAP_WB_EN; + + dev_info(hba->dev, "%s: WB force disabled\n", __func__); +} + +static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) +{ + u32 lifetime; + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST, + index, 0, &lifetime); + if (ret) { + dev_err(hba->dev, + "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n", + __func__, ret); + return false; + } + + if (lifetime == UFS_WB_EXCEED_LIFETIME) { + dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", + __func__, lifetime); + return false; + } + + dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", + __func__, lifetime); + + return true; +} + +static bool ufshcd_wb_need_flush(struct ufs_hba *hba) +{ + int ret; + u32 avail_buf; + u8 index; + + if (!ufshcd_is_wb_allowed(hba)) + return false; + + if (!ufshcd_is_wb_buf_lifetime_available(hba)) { + ufshcd_wb_force_disable(hba); + return false; + } + + /* + * The ufs device needs the vcc to be ON to flush. + * With user-space reduction enabled, it's enough to enable flush + * by checking only the available buffer. The threshold + * defined here is > 90% full. + * With user-space preserved enabled, the current-buffer + * should be checked too because the wb buffer size can reduce + * when disk tends to be full. This info is provided by current + * buffer (dCurrentWriteBoosterBufferSize). There's no point in + * keeping vcc on when current buffer is empty. + */ + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE, + index, 0, &avail_buf); + if (ret) { + dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n", + __func__, ret); + return false; + } + + if (!hba->dev_info.b_presrv_uspc_en) + return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); + + return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); +} + +static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(to_delayed_work(work), + struct ufs_hba, + rpm_dev_flush_recheck_work); + /* + * To prevent unnecessary VCC power drain after device finishes + * WriteBooster buffer flush or Auto BKOPs, force runtime resume + * after a certain delay to recheck the threshold by next runtime + * suspend. + */ + ufshcd_rpm_get_sync(hba); + ufshcd_rpm_put_sync(hba); +} + +/** + * ufshcd_exception_event_handler - handle exceptions raised by device + * @work: pointer to work data + * + * Read bExceptionEventStatus attribute from the device and handle the + * exception event accordingly. + */ +static void ufshcd_exception_event_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + int err; + u32 status = 0; + hba = container_of(work, struct ufs_hba, eeh_work); + + ufshcd_scsi_block_requests(hba); + err = ufshcd_get_ee_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get exception status %d\n", + __func__, err); + goto out; + } + + trace_ufshcd_exception_event(dev_name(hba->dev), status); + + if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) + ufshcd_bkops_exception_event_handler(hba); + + if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) + ufshcd_temp_exception_event_handler(hba, status); + + ufs_debugfs_exception_event(hba, status); +out: + ufshcd_scsi_unblock_requests(hba); +} + +/* Complete requests that have door-bell cleared */ +static void ufshcd_complete_requests(struct ufs_hba *hba) +{ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); +} + +/** + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is + * to recover from the DL NAC errors or not. + * @hba: per-adapter instance + * + * Returns true if error handling is required, false otherwise + */ +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) +{ + unsigned long flags; + bool err_handling = true; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the + * device fatal error and/or DL NAC & REPLAY timeout errors. + */ + if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) + goto out; + + if ((hba->saved_err & DEVICE_FATAL_ERROR) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) + goto out; + + if ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { + int err; + /* + * wait for 50ms to see if we can get any other errors or not. + */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + msleep(50); + spin_lock_irqsave(hba->host->host_lock, flags); + + /* + * now check if we have got any other severe errors other than + * DL NAC error? + */ + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) + goto out; + + /* + * As DL NAC is the only error received so far, send out NOP + * command to confirm if link is still active or not. + * - If we don't get any response then do error recovery. + * - If we get response then clear the DL NAC error bit. + */ + + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_verify_dev_init(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + + if (err) + goto out; + + /* Link seems to be alive hence ignore the DL NAC errors */ + if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) + hba->saved_err &= ~UIC_ERROR; + /* clear NAC error */ + hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + if (!hba->saved_uic_err) + err_handling = false; + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err_handling; +} + +/* host lock must be held before calling this func */ +static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) +{ + return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || + (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); +} + +void ufshcd_schedule_eh_work(struct ufs_hba *hba) +{ + lockdep_assert_held(hba->host->host_lock); + + /* handle fatal errors only when link is not in error state */ + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { + if (hba->force_reset || ufshcd_is_link_broken(hba) || + ufshcd_is_saved_err_fatal(hba)) + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; + else + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; + queue_work(hba->eh_wq, &hba->eh_work); + } +} + +static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) +{ + down_write(&hba->clk_scaling_lock); + hba->clk_scaling.is_allowed = allow; + up_write(&hba->clk_scaling_lock); +} + +static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) +{ + if (suspend) { + if (hba->clk_scaling.is_enabled) + ufshcd_suspend_clkscaling(hba); + ufshcd_clk_scaling_allow(hba, false); + } else { + ufshcd_clk_scaling_allow(hba, true); + if (hba->clk_scaling.is_enabled) + ufshcd_resume_clkscaling(hba); + } +} + +static void ufshcd_err_handling_prepare(struct ufs_hba *hba) +{ + ufshcd_rpm_get_sync(hba); + if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || + hba->is_sys_suspended) { + enum ufs_pm_op pm_op; + + /* + * Don't assume anything of resume, if + * resume fails, irq and clocks can be OFF, and powers + * can be OFF or in LPM. + */ + ufshcd_setup_hba_vreg(hba, true); + ufshcd_enable_irq(hba); + ufshcd_setup_vreg(hba, true); + ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); + ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); + ufshcd_hold(hba, false); + if (!ufshcd_is_clkgating_allowed(hba)) + ufshcd_setup_clocks(hba, true); + ufshcd_release(hba); + pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; + ufshcd_vops_resume(hba, pm_op); + } else { + ufshcd_hold(hba, false); + if (ufshcd_is_clkscaling_supported(hba) && + hba->clk_scaling.is_enabled) + ufshcd_suspend_clkscaling(hba); + ufshcd_clk_scaling_allow(hba, false); + } + ufshcd_scsi_block_requests(hba); + /* Drain ufshcd_queuecommand() */ + synchronize_rcu(); + cancel_work_sync(&hba->eeh_work); +} + +static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) +{ + ufshcd_scsi_unblock_requests(hba); + ufshcd_release(hba); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + ufshcd_rpm_put(hba); +} + +static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) +{ + return (!hba->is_powered || hba->shutting_down || + !hba->ufs_device_wlun || + hba->ufshcd_state == UFSHCD_STATE_ERROR || + (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || + ufshcd_is_link_broken(hba)))); +} + +#ifdef CONFIG_PM +static void ufshcd_recover_pm_error(struct ufs_hba *hba) +{ + struct Scsi_Host *shost = hba->host; + struct scsi_device *sdev; + struct request_queue *q; + int ret; + + hba->is_sys_suspended = false; + /* + * Set RPM status of wlun device to RPM_ACTIVE, + * this also clears its runtime error. + */ + ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); + + /* hba device might have a runtime error otherwise */ + if (ret) + ret = pm_runtime_set_active(hba->dev); + /* + * If wlun device had runtime error, we also need to resume those + * consumer scsi devices in case any of them has failed to be + * resumed due to supplier runtime resume failure. This is to unblock + * blk_queue_enter in case there are bios waiting inside it. + */ + if (!ret) { + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + if (q->dev && (q->rpm_status == RPM_SUSPENDED || + q->rpm_status == RPM_SUSPENDING)) + pm_request_resume(q->dev); + } + } +} +#else +static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) +{ +} +#endif + +static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; + u32 mode; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); + + if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK)) + return true; + + if (pwr_info->pwr_tx != (mode & PWRMODE_MASK)) + return true; + + return false; +} + +/** + * ufshcd_err_handler - handle UFS errors that require s/w attention + * @work: pointer to work structure + */ +static void ufshcd_err_handler(struct work_struct *work) +{ + int retries = MAX_ERR_HANDLER_RETRIES; + struct ufs_hba *hba; + unsigned long flags; + bool needs_restore; + bool needs_reset; + bool err_xfer; + bool err_tm; + int pmc_err; + int tag; + + hba = container_of(work, struct ufs_hba, eh_work); + + dev_info(hba->dev, + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + __func__, ufshcd_state_name[hba->ufshcd_state], + hba->is_powered, hba->shutting_down, hba->saved_err, + hba->saved_uic_err, hba->force_reset, + ufshcd_is_link_broken(hba) ? "; link is broken" : ""); + + down(&hba->host_sem); + spin_lock_irqsave(hba->host->host_lock, flags); + if (ufshcd_err_handling_should_stop(hba)) { + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + up(&hba->host_sem); + return; + } + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_prepare(hba); + /* Complete requests that have door-bell cleared by h/w */ + ufshcd_complete_requests(hba); + spin_lock_irqsave(hba->host->host_lock, flags); +again: + needs_restore = false; + needs_reset = false; + err_xfer = false; + err_tm = false; + + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) + hba->ufshcd_state = UFSHCD_STATE_RESET; + /* + * A full reset and restore might have happened after preparation + * is finished, double check whether we should stop. + */ + if (ufshcd_err_handling_should_stop(hba)) + goto skip_err_handling; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + bool ret; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ + ret = ufshcd_quirk_dl_nac_errors(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + if (!ret && ufshcd_err_handling_should_stop(hba)) + goto skip_err_handling; + } + + if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || + (hba->saved_uic_err && + (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { + bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); + + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_evt_hist(hba); + ufshcd_print_tmrs(hba, hba->outstanding_tasks); + ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt); + spin_lock_irqsave(hba->host->host_lock, flags); + } + + /* + * if host reset is required then skip clearing the pending + * transfers forcefully because they will get cleared during + * host reset and restore + */ + if (hba->force_reset || ufshcd_is_link_broken(hba) || + ufshcd_is_saved_err_fatal(hba) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | + UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) { + needs_reset = true; + goto do_reset; + } + + /* + * If LINERESET was caught, UFS might have been put to PWM mode, + * check if power mode restore is needed. + */ + if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { + hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; + if (!hba->saved_uic_err) + hba->saved_err &= ~UIC_ERROR; + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ufshcd_is_pwr_mode_restore_needed(hba)) + needs_restore = true; + spin_lock_irqsave(hba->host->host_lock, flags); + if (!hba->saved_err && !needs_restore) + goto skip_err_handling; + } + + hba->silence_err_logs = true; + /* release lock as clear command might sleep */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Clear pending transfer requests */ + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + if (ufshcd_try_to_abort_task(hba, tag)) { + err_xfer = true; + goto lock_skip_pending_xfer_clear; + } + dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1); + } + + /* Clear pending task management requests */ + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { + if (ufshcd_clear_tm_cmd(hba, tag)) { + err_tm = true; + goto lock_skip_pending_xfer_clear; + } + } + +lock_skip_pending_xfer_clear: + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->silence_err_logs = false; + if (err_xfer || err_tm) { + needs_reset = true; + goto do_reset; + } + + /* + * After all reqs and tasks are cleared from doorbell, + * now it is safe to retore power mode. + */ + if (needs_restore) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* + * Hold the scaling lock just in case dev cmds + * are sent via bsg and/or sysfs. + */ + down_write(&hba->clk_scaling_lock); + hba->force_pmc = true; + pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); + if (pmc_err) { + needs_reset = true; + dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", + __func__, pmc_err); + } + hba->force_pmc = false; + ufshcd_print_pwr_info(hba); + up_write(&hba->clk_scaling_lock); + spin_lock_irqsave(hba->host->host_lock, flags); + } + +do_reset: + /* Fatal errors need reset */ + if (needs_reset) { + int err; + + hba->force_reset = false; + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_reset_and_restore(hba); + if (err) + dev_err(hba->dev, "%s: reset and restore failed with err %d\n", + __func__, err); + else + ufshcd_recover_pm_error(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + } + +skip_err_handling: + if (!needs_reset) { + if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + if (hba->saved_err || hba->saved_uic_err) + dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", + __func__, hba->saved_err, hba->saved_uic_err); + } + /* Exit in an operational state or dead */ + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && + hba->ufshcd_state != UFSHCD_STATE_ERROR) { + if (--retries) + goto again; + hba->ufshcd_state = UFSHCD_STATE_ERROR; + } + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_unprepare(hba); + up(&hba->host_sem); + + dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, + ufshcd_state_name[hba->ufshcd_state]); +} + +/** + * ufshcd_update_uic_error - check and set fatal UIC error flags. + * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) +{ + u32 reg; + irqreturn_t retval = IRQ_NONE; + + /* PHY layer error */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && + (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); + /* + * To know whether this error is fatal or not, DB timeout + * must be checked but this error is handled separately. + */ + if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK) + dev_dbg(hba->dev, "%s: UIC Lane error reported\n", + __func__); + + /* Got a LINERESET indication. */ + if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) { + struct uic_command *cmd = NULL; + + hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; + if (hba->uic_async_done && hba->active_uic_cmd) + cmd = hba->active_uic_cmd; + /* + * Ignore the LINERESET during power mode change + * operation via DME_SET command. + */ + if (cmd && (cmd->command == UIC_CMD_DME_SET)) + hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; + } + retval |= IRQ_HANDLED; + } + + /* PA_INIT_ERROR is fatal and needs UIC reset */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); + if ((reg & UIC_DATA_LINK_LAYER_ERROR) && + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); + + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } + retval |= IRQ_HANDLED; + } + + /* UIC NL/TL/DME errors needs software retry */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); + if ((reg & UIC_NETWORK_LAYER_ERROR) && + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); + hba->uic_error |= UFSHCD_UIC_NL_ERROR; + retval |= IRQ_HANDLED; + } + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); + if ((reg & UIC_TRANSPORT_LAYER_ERROR) && + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); + hba->uic_error |= UFSHCD_UIC_TL_ERROR; + retval |= IRQ_HANDLED; + } + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); + if ((reg & UIC_DME_ERROR) && + (reg & UIC_DME_ERROR_CODE_MASK)) { + ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); + hba->uic_error |= UFSHCD_UIC_DME_ERROR; + retval |= IRQ_HANDLED; + } + + dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", + __func__, hba->uic_error); + return retval; +} + +/** + * ufshcd_check_errors - Check for errors that need s/w attention + * @hba: per-adapter instance + * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) +{ + bool queue_eh_work = false; + irqreturn_t retval = IRQ_NONE; + + spin_lock(hba->host->host_lock); + hba->errors |= UFSHCD_ERROR_MASK & intr_status; + + if (hba->errors & INT_FATAL_ERRORS) { + ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, + hba->errors); + queue_eh_work = true; + } + + if (hba->errors & UIC_ERROR) { + hba->uic_error = 0; + retval = ufshcd_update_uic_error(hba); + if (hba->uic_error) + queue_eh_work = true; + } + + if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { + dev_err(hba->dev, + "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n", + __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? + "Enter" : "Exit", + hba->errors, ufshcd_get_upmcrs(hba)); + ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, + hba->errors); + ufshcd_set_link_broken(hba); + queue_eh_work = true; + } + + if (queue_eh_work) { + /* + * update the transfer error masks to sticky bits, let's do this + * irrespective of current ufshcd_state. + */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; + + /* dump controller state before resetting */ + if ((hba->saved_err & + (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || + (hba->saved_uic_err && + (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { + dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", + __func__, hba->saved_err, + hba->saved_uic_err); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, + "host_regs: "); + ufshcd_print_pwr_info(hba); + } + ufshcd_schedule_eh_work(hba); + retval |= IRQ_HANDLED; + } + /* + * if (!queue_eh_work) - + * Other errors are either non-fatal where host recovers + * itself without s/w intervention or errors that will be + * handled by the SCSI core layer. + */ + hba->errors = 0; + hba->uic_error = 0; + spin_unlock(hba->host->host_lock); + return retval; +} + +/** + * ufshcd_tmc_handler - handle task management function completion + * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) +{ + unsigned long flags, pending, issued; + irqreturn_t ret = IRQ_NONE; + int tag; + + spin_lock_irqsave(hba->host->host_lock, flags); + pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); + issued = hba->outstanding_tasks & ~pending; + for_each_set_bit(tag, &issued, hba->nutmrs) { + struct request *req = hba->tmf_rqs[tag]; + struct completion *c = req->end_io_data; + + complete(c); + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + +/** + * ufshcd_sl_intr - Interrupt service routine + * @hba: per adapter instance + * @intr_status: contains interrupts generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +{ + irqreturn_t retval = IRQ_NONE; + + if (intr_status & UFSHCD_UIC_MASK) + retval |= ufshcd_uic_cmd_compl(hba, intr_status); + + if (intr_status & UFSHCD_ERROR_MASK || hba->errors) + retval |= ufshcd_check_errors(hba, intr_status); + + if (intr_status & UTP_TASK_REQ_COMPL) + retval |= ufshcd_tmc_handler(hba); + + if (intr_status & UTP_TRANSFER_REQ_COMPL) + retval |= ufshcd_transfer_req_compl(hba); + + return retval; +} + +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + u32 intr_status, enabled_intr_status = 0; + irqreturn_t retval = IRQ_NONE; + struct ufs_hba *hba = __hba; + int retries = hba->nutrs; + + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + hba->ufs_stats.last_intr_status = intr_status; + hba->ufs_stats.last_intr_ts = ktime_get(); + + /* + * There could be max of hba->nutrs reqs in flight and in worst case + * if the reqs get finished 1 by 1 after the interrupt status is + * read, make sure we handle them by checking the interrupt status + * again in a loop until we process all of the reqs before returning. + */ + while (intr_status && retries--) { + enabled_intr_status = + intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + if (enabled_intr_status) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); + + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + } + + if (enabled_intr_status && retval == IRQ_NONE && + (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) || + hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", + __func__, + intr_status, + hba->ufs_stats.last_intr_status, + enabled_intr_status); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + } + + return retval; +} + +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + u32 mask = 1 << tag; + unsigned long flags; + + if (!test_bit(tag, &hba->outstanding_tasks)) + goto out; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_utmrl_clear(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* poll for max. 1 sec to clear door bell register by h/w */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TASK_REQ_DOOR_BELL, + mask, 0, 1000, 1000); + + dev_err(hba->dev, "Clearing task management function with tag %d %s\n", + tag, err ? "succeeded" : "failed"); + +out: + return err; +} + +static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, + struct utp_task_req_desc *treq, u8 tm_function) +{ + struct request_queue *q = hba->tmf_queue; + struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; + unsigned long flags; + int task_tag, err; + + /* + * blk_mq_alloc_request() is used here only to get a free tag. + */ + req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + + req->end_io_data = &wait; + ufshcd_hold(hba, false); + + spin_lock_irqsave(host->host_lock, flags); + + task_tag = req->tag; + WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", + task_tag); + hba->tmf_rqs[req->tag] = req; + treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); + + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); + + /* send command to the controller */ + __set_bit(task_tag, &hba->outstanding_tasks); + + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + /* Make sure that doorbell is committed immediately */ + wmb(); + + spin_unlock_irqrestore(host->host_lock, flags); + + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); + + /* wait until the task management command is completed */ + err = wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(TM_CMD_TIMEOUT)); + if (!err) { + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); + dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", + __func__, tm_function); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); + err = -ETIMEDOUT; + } else { + err = 0; + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); + + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->tmf_rqs[req->tag] = NULL; + __clear_bit(task_tag, &hba->outstanding_tasks); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + ufshcd_release(hba); + blk_mq_free_request(req); + + return err; +} + +/** + * ufshcd_issue_tm_cmd - issues task management commands to controller + * @hba: per adapter instance + * @lun_id: LUN ID to which TM command is sent + * @task_id: task ID to which the TM command is applicable + * @tm_function: task management function opcode + * @tm_response: task management service response return value + * + * Returns non-zero value on error, zero on success. + */ +static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, + u8 tm_function, u8 *tm_response) +{ + struct utp_task_req_desc treq = { { 0 }, }; + enum utp_ocs ocs_value; + int err; + + /* Configure task request descriptor */ + treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); + treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + + /* Configure task request UPIU */ + treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) | + cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24); + treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16); + + /* + * The host shall provide the same value for LUN field in the basic + * header and for Input Parameter. + */ + treq.upiu_req.input_param1 = cpu_to_be32(lun_id); + treq.upiu_req.input_param2 = cpu_to_be32(task_id); + + err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); + if (err == -ETIMEDOUT) + return err; + + ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; + if (ocs_value != OCS_SUCCESS) + dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", + __func__, ocs_value); + else if (tm_response) + *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) & + MASK_TM_SERVICE_RESP; + return err; +} + +/** + * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests + * @hba: per-adapter instance + * @req_upiu: upiu request + * @rsp_upiu: upiu reply + * @desc_buff: pointer to descriptor buffer, NULL if NA + * @buff_len: descriptor size, 0 if NA + * @cmd_type: specifies the type (NOP, Query...) + * @desc_op: descriptor operation + * + * Those type of requests uses UTP Transfer Request Descriptor - utrd. + * Therefore, it "rides" the device management infrastructure: uses its tag and + * tasks work queues. + * + * Since there is only one available tag for device management commands, + * the caller is expected to hold the hba->dev_cmd.lock mutex. + */ +static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, + struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, + u8 *desc_buff, int *buff_len, + enum dev_cmd_type cmd_type, + enum query_opcode desc_op) +{ + DECLARE_COMPLETION_ONSTACK(wait); + const u32 tag = hba->reserved_slot; + struct ufshcd_lrb *lrbp; + int err = 0; + u8 upiu_flags; + + /* Protects use of hba->reserved_slot. */ + lockdep_assert_held(&hba->dev_cmd.lock); + + down_read(&hba->clk_scaling_lock); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + lrbp->cmd = NULL; + lrbp->task_tag = tag; + lrbp->lun = 0; + lrbp->intr_cmd = true; + ufshcd_prepare_lrbp_crypto(NULL, lrbp); + hba->dev_cmd.type = cmd_type; + + if (hba->ufs_version <= ufshci_version(1, 1)) + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + else + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + /* update the task tag in the request upiu */ + req_upiu->header.dword_0 |= cpu_to_be32(tag); + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + + /* just copy the upiu request as it is */ + memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); + if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { + /* The Data Segment Area is optional depending upon the query + * function value. for WRITE DESCRIPTOR, the data segment + * follows right after the tsf. + */ + memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); + *buff_len = 0; + } + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + + hba->dev_cmd.complete = &wait; + + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); + + ufshcd_send_command(hba, tag); + /* + * ignore the returning value here - ufshcd_check_query_response is + * bound to fail since dev_cmd.query and dev_cmd.type were left empty. + * read the response directly ignoring all errors. + */ + ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); + + /* just copy the upiu response as it is */ + memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); + if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); + u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + + if (*buff_len >= resp_len) { + memcpy(desc_buff, descp, resp_len); + *buff_len = resp_len; + } else { + dev_warn(hba->dev, + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, *buff_len); + *buff_len = 0; + err = -EINVAL; + } + } + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); + + up_read(&hba->clk_scaling_lock); + return err; +} + +/** + * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands + * @hba: per-adapter instance + * @req_upiu: upiu request + * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands + * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target + * @desc_buff: pointer to descriptor buffer, NULL if NA + * @buff_len: descriptor size, 0 if NA + * @desc_op: descriptor operation + * + * Supports UTP Transfer requests (nop and query), and UTP Task + * Management requests. + * It is up to the caller to fill the upiu conent properly, as it will + * be copied without any further input validations. + */ +int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, + struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, + int msgcode, + u8 *desc_buff, int *buff_len, + enum query_opcode desc_op) +{ + int err; + enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; + struct utp_task_req_desc treq = { { 0 }, }; + enum utp_ocs ocs_value; + u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; + + switch (msgcode) { + case UPIU_TRANSACTION_NOP_OUT: + cmd_type = DEV_CMD_TYPE_NOP; + fallthrough; + case UPIU_TRANSACTION_QUERY_REQ: + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, + desc_buff, buff_len, + cmd_type, desc_op); + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + break; + case UPIU_TRANSACTION_TASK_REQ: + treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); + treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + + memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu)); + + err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); + if (err == -ETIMEDOUT) + break; + + ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; + if (ocs_value != OCS_SUCCESS) { + dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, + ocs_value); + break; + } + + memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu)); + + break; + default: + err = -EINVAL; + + break; + } + + return err; +} + +/** + * ufshcd_eh_device_reset_handler - device reset handler registered to + * scsi layer. + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host; + struct ufs_hba *hba; + u32 pos; + int err; + u8 resp = 0xF, lun; + + host = cmd->device->host; + hba = shost_priv(host); + + lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; + goto out; + } + + /* clear the commands that were pending for corresponding LUN */ + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { + if (hba->lrb[pos].lun == lun) { + err = ufshcd_clear_cmd(hba, pos); + if (err) + break; + __ufshcd_transfer_req_compl(hba, 1U << pos); + } + } + +out: + hba->req_abort_count = 0; + ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); + if (!err) { + err = SUCCESS; + } else { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + err = FAILED; + } + return err; +} + +static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) +{ + struct ufshcd_lrb *lrbp; + int tag; + + for_each_set_bit(tag, &bitmap, hba->nutrs) { + lrbp = &hba->lrb[tag]; + lrbp->req_abort_skip = true; + } +} + +/** + * ufshcd_try_to_abort_task - abort a specific task + * @hba: Pointer to adapter instance + * @tag: Task tag/index to be aborted + * + * Abort the pending command in device by sending UFS_ABORT_TASK task management + * command, and in host controller by clearing the door-bell register. There can + * be race between controller sending the command to the device while abort is + * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is + * really issued and then try to abort it. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) +{ + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + int err = 0; + int poll_cnt; + u8 resp = 0xF; + u32 reg; + + for (poll_cnt = 100; poll_cnt; poll_cnt--) { + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_QUERY_TASK, &resp); + if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { + /* cmd pending in the device */ + dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", + __func__, tag); + break; + } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + /* + * cmd not pending in the device, check if it is + * in transition. + */ + dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + __func__, tag); + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + if (reg & (1 << tag)) { + /* sleep for max. 200us to stabilize */ + usleep_range(100, 200); + continue; + } + /* command completed already */ + dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", + __func__, tag); + goto out; + } else { + dev_err(hba->dev, + "%s: no response from device. tag = %d, err %d\n", + __func__, tag, err); + if (!err) + err = resp; /* service response error */ + goto out; + } + } + + if (!poll_cnt) { + err = -EBUSY; + goto out; + } + + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_ABORT_TASK, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) { + err = resp; /* service response error */ + dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", + __func__, tag, err); + } + goto out; + } + + err = ufshcd_clear_cmd(hba, tag); + if (err) + dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", + __func__, tag, err); + +out: + return err; +} + +/** + * ufshcd_abort - scsi host template eh_abort_handler callback + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + unsigned long flags; + int err = FAILED; + bool outstanding; + u32 reg; + + WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); + + ufshcd_hold(hba, false); + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + /* If command is already aborted/completed, return FAILED. */ + if (!(test_bit(tag, &hba->outstanding_reqs))) { + dev_err(hba->dev, + "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", + __func__, tag, hba->outstanding_reqs, reg); + goto release; + } + + /* Print Transfer Request of aborted task */ + dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); + + /* + * Print detailed info about aborted request. + * As more than one request might get aborted at the same time, + * print full information only for the first aborted request in order + * to reduce repeated printouts. For other aborted requests only print + * basic details. + */ + scsi_print_command(cmd); + if (!hba->req_abort_count) { + ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); + ufshcd_print_pwr_info(hba); + ufshcd_print_trs(hba, 1 << tag, true); + } else { + ufshcd_print_trs(hba, 1 << tag, false); + } + hba->req_abort_count++; + + if (!(reg & (1 << tag))) { + dev_err(hba->dev, + "%s: cmd was completed, but without a notifying intr, tag = %d", + __func__, tag); + __ufshcd_transfer_req_compl(hba, 1UL << tag); + goto release; + } + + /* + * Task abort to the device W-LUN is illegal. When this command + * will fail, due to spec violation, scsi err handling next step + * will be to send LU reset which, again, is a spec violation. + * To avoid these unnecessary/illegal steps, first we clean up + * the lrb taken by this cmd and re-set it in outstanding_reqs, + * then queue the eh_work and bail. + */ + if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { + ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); + + spin_lock_irqsave(host->host_lock, flags); + hba->force_reset = true; + ufshcd_schedule_eh_work(hba); + spin_unlock_irqrestore(host->host_lock, flags); + goto release; + } + + /* Skip task abort in case previous aborts failed and report failure */ + if (lrbp->req_abort_skip) { + dev_err(hba->dev, "%s: skipping abort\n", __func__); + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); + goto release; + } + + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); + err = FAILED; + goto release; + } + + /* + * Clear the corresponding bit from outstanding_reqs since the command + * has been aborted successfully. + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (outstanding) + ufshcd_release_scsi_cmd(hba, lrbp); + + err = SUCCESS; + +release: + /* Matches the ufshcd_hold() call at the start of this function. */ + ufshcd_release(hba); + return err; +} + +/** + * ufshcd_host_reset_and_restore - reset and restore host controller + * @hba: per-adapter instance + * + * Note that host controller reset may issue DME_RESET to + * local and remote (device) Uni-Pro stack and the attributes + * are reset to default state. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) +{ + int err; + + /* + * Stop the host controller and complete the requests + * cleared by h/w + */ + ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET); + ufshcd_hba_stop(hba); + hba->silence_err_logs = true; + ufshcd_complete_requests(hba); + hba->silence_err_logs = false; + + /* scale up clocks to max frequency before full reinitialization */ + ufshcd_set_clk_freq(hba, true); + + err = ufshcd_hba_enable(hba); + + /* Establish the link again and restore the device */ + if (!err) + err = ufshcd_probe_hba(hba, false); + + if (err) + dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); + ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); + return err; +} + +/** + * ufshcd_reset_and_restore - reset and re-initialize host/device + * @hba: per-adapter instance + * + * Reset and recover device, host and re-establish link. This + * is helpful to recover the communication in fatal error conditions. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_reset_and_restore(struct ufs_hba *hba) +{ + u32 saved_err = 0; + u32 saved_uic_err = 0; + int err = 0; + unsigned long flags; + int retries = MAX_HOST_RESET_RETRIES; + + spin_lock_irqsave(hba->host->host_lock, flags); + do { + /* + * This is a fresh start, cache and clear saved error first, + * in case new error generated during reset and restore. + */ + saved_err |= hba->saved_err; + saved_uic_err |= hba->saved_uic_err; + hba->saved_err = 0; + hba->saved_uic_err = 0; + hba->force_reset = false; + hba->ufshcd_state = UFSHCD_STATE_RESET; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + err = ufshcd_host_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (err) + continue; + /* Do not exit unless operational or dead */ + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && + hba->ufshcd_state != UFSHCD_STATE_ERROR && + hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) + err = -EAGAIN; + } while (err && --retries); + + /* + * Inform scsi mid-layer that we did reset and allow to handle + * Unit Attention properly. + */ + scsi_report_bus_reset(hba->host, 0); + if (err) { + hba->ufshcd_state = UFSHCD_STATE_ERROR; + hba->saved_err |= saved_err; + hba->saved_uic_err |= saved_uic_err; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return err; +} + +/** + * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + int err = SUCCESS; + unsigned long flags; + struct ufs_hba *hba; + + hba = shost_priv(cmd->device->host); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->force_reset = true; + ufshcd_schedule_eh_work(hba); + dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + flush_work(&hba->eh_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_ERROR) + err = FAILED; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return err; +} + +/** + * ufshcd_get_max_icc_level - calculate the ICC level + * @sup_curr_uA: max. current supported by the regulator + * @start_scan: row at the desc table to start scan from + * @buff: power descriptor buffer + * + * Returns calculated max ICC level for specific regulator + */ +static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) +{ + int i; + int curr_uA; + u16 data; + u16 unit; + + for (i = start_scan; i >= 0; i--) { + data = get_unaligned_be16(&buff[2 * i]); + unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> + ATTR_ICC_LVL_UNIT_OFFSET; + curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; + switch (unit) { + case UFSHCD_NANO_AMP: + curr_uA = curr_uA / 1000; + break; + case UFSHCD_MILI_AMP: + curr_uA = curr_uA * 1000; + break; + case UFSHCD_AMP: + curr_uA = curr_uA * 1000 * 1000; + break; + case UFSHCD_MICRO_AMP: + default: + break; + } + if (sup_curr_uA >= curr_uA) + break; + } + if (i < 0) { + i = 0; + pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); + } + + return (u32)i; +} + +/** + * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level + * In case regulators are not initialized we'll return 0 + * @hba: per-adapter instance + * @desc_buf: power descriptor buffer to extract ICC levels from. + * @len: length of desc_buff + * + * Returns calculated ICC level + */ +static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, + u8 *desc_buf, int len) +{ + u32 icc_level = 0; + + if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || + !hba->vreg_info.vccq2) { + /* + * Using dev_dbg to avoid messages during runtime PM to avoid + * never-ending cycles of messages written back to storage by + * user space causing runtime resume, causing more messages and + * so on. + */ + dev_dbg(hba->dev, + "%s: Regulator capability was not set, actvIccLevel=%d", + __func__, icc_level); + goto out; + } + + if (hba->vreg_info.vcc->max_uA) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vcc->max_uA, + POWER_DESC_MAX_ACTV_ICC_LVLS - 1, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); + + if (hba->vreg_info.vccq->max_uA) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); + + if (hba->vreg_info.vccq2->max_uA) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq2->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); +out: + return icc_level; +} + +static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) +{ + int ret; + int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; + u8 *desc_buf; + u32 icc_level; + + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return; + + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, + desc_buf, buff_len); + if (ret) { + dev_err(hba->dev, + "%s: Failed reading power descriptor.len = %d ret = %d", + __func__, buff_len, ret); + goto out; + } + + icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf, + buff_len); + dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); + + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); + + if (ret) + dev_err(hba->dev, + "%s: Failed configuring bActiveICCLevel = %d ret = %d", + __func__, icc_level, ret); + +out: + kfree(desc_buf); +} + +static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev) +{ + scsi_autopm_get_device(sdev); + blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev); + if (sdev->rpm_autosuspend) + pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev, + RPM_AUTOSUSPEND_DELAY_MS); + scsi_autopm_put_device(sdev); +} + +/** + * ufshcd_scsi_add_wlus - Adds required W-LUs + * @hba: per-adapter instance + * + * UFS device specification requires the UFS devices to support 4 well known + * logical units: + * "REPORT_LUNS" (address: 01h) + * "UFS Device" (address: 50h) + * "RPMB" (address: 44h) + * "BOOT" (address: 30h) + * UFS device's power management needs to be controlled by "POWER CONDITION" + * field of SSU (START STOP UNIT) command. But this "power condition" field + * will take effect only when its sent to "UFS device" well known logical unit + * hence we require the scsi_device instance to represent this logical unit in + * order for the UFS host driver to send the SSU command for power management. + * + * We also require the scsi_device instance for "RPMB" (Replay Protected Memory + * Block) LU so user space process can control this LU. User space may also + * want to have access to BOOT LU. + * + * This function adds scsi device instances for each of all well known LUs + * (except "REPORT LUNS" LU). + * + * Returns zero on success (all required W-LUs are added successfully), + * non-zero error value on failure (if failed to add any of the required W-LU). + */ +static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) +{ + int ret = 0; + struct scsi_device *sdev_boot, *sdev_rpmb; + + hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); + if (IS_ERR(hba->ufs_device_wlun)) { + ret = PTR_ERR(hba->ufs_device_wlun); + hba->ufs_device_wlun = NULL; + goto out; + } + scsi_device_put(hba->ufs_device_wlun); + + sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); + if (IS_ERR(sdev_rpmb)) { + ret = PTR_ERR(sdev_rpmb); + goto remove_ufs_device_wlun; + } + ufshcd_blk_pm_runtime_init(sdev_rpmb); + scsi_device_put(sdev_rpmb); + + sdev_boot = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); + if (IS_ERR(sdev_boot)) { + dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); + } else { + ufshcd_blk_pm_runtime_init(sdev_boot); + scsi_device_put(sdev_boot); + } + goto out; + +remove_ufs_device_wlun: + scsi_remove_device(hba->ufs_device_wlun); +out: + return ret; +} + +static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u8 lun; + u32 d_lu_wb_buf_alloc; + u32 ext_ufs_feature; + + if (!ufshcd_is_wb_allowed(hba)) + return; + + /* + * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or + * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES + * enabled + */ + if (!(dev_info->wspecversion >= 0x310 || + dev_info->wspecversion == 0x220 || + (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) + goto wb_disabled; + + if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4) + goto wb_disabled; + + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + + if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP)) + goto wb_disabled; + + /* + * WB may be supported but not configured while provisioning. The spec + * says, in dedicated wb buffer mode, a max of 1 lun would have wb + * buffer configured. + */ + dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + + dev_info->b_presrv_uspc_en = + desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; + + if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) { + if (!get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS)) + goto wb_disabled; + } else { + for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { + d_lu_wb_buf_alloc = 0; + ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS, + (u8 *)&d_lu_wb_buf_alloc, + sizeof(d_lu_wb_buf_alloc)); + if (d_lu_wb_buf_alloc) { + dev_info->wb_dedicated_lu = lun; + break; + } + } + + if (!d_lu_wb_buf_alloc) + goto wb_disabled; + } + + if (!ufshcd_is_wb_buf_lifetime_available(hba)) + goto wb_disabled; + + return; + +wb_disabled: + hba->caps &= ~UFSHCD_CAP_WB_EN; +} + +static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u32 ext_ufs_feature; + u8 mask = 0; + + if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) + return; + + ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + + if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF) + mask |= MASK_EE_TOO_LOW_TEMP; + + if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF) + mask |= MASK_EE_TOO_HIGH_TEMP; + + if (mask) { + ufshcd_enable_ee(hba, mask); + ufs_hwmon_probe(hba, mask); + } +} + +void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, + const struct ufs_dev_quirk *fixups) +{ + const struct ufs_dev_quirk *f; + struct ufs_dev_info *dev_info = &hba->dev_info; + + if (!fixups) + return; + + for (f = fixups; f->quirk; f++) { + if ((f->wmanufacturerid == dev_info->wmanufacturerid || + f->wmanufacturerid == UFS_ANY_VENDOR) && + ((dev_info->model && + STR_PRFX_EQUAL(f->model, dev_info->model)) || + !strcmp(f->model, UFS_ANY_MODEL))) + hba->dev_quirks |= f->quirk; + } +} +EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks); + +static void ufs_fixup_device_setup(struct ufs_hba *hba) +{ + /* fix by general quirk table */ + ufshcd_fixup_dev_quirks(hba, ufs_fixups); + + /* allow vendors to fix quirks */ + ufshcd_vops_fixup_dev_quirks(hba); +} + +static int ufs_get_device_desc(struct ufs_hba *hba) +{ + int err; + u8 model_index; + u8 b_ufs_feature_sup; + u8 *desc_buf; + struct ufs_dev_info *dev_info = &hba->dev_info; + + desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, + hba->desc_size[QUERY_DESC_IDN_DEVICE]); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + /* getting Specification Version in big endian format */ + dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | + desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; + b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && + (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) { + bool hpb_en = false; + + ufshpb_get_dev_info(hba, desc_buf); + + if (!ufshpb_is_legacy(hba)) + err = ufshcd_query_flag_retry(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_EN, 0, + &hpb_en); + + if (ufshpb_is_legacy(hba) || (!err && hpb_en)) + dev_info->hpb_enabled = true; + } + + err = ufshcd_read_string_desc(hba, model_index, + &dev_info->model, SD_ASCII_STD); + if (err < 0) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + + desc_buf[DEVICE_DESC_PARAM_NUM_WLU]; + + ufs_fixup_device_setup(hba); + + ufshcd_wb_probe(hba, desc_buf); + + ufshcd_temp_notif_probe(hba, desc_buf); + + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value + */ + err = 0; + +out: + kfree(desc_buf); + return err; +} + +static void ufs_put_device_desc(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + kfree(dev_info->model); + dev_info->model = NULL; +} + +/** + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro + * @hba: per-adapter instance + * + * PA_TActivate parameter can be tuned manually if UniPro version is less than + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce + * the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_min_activatetime); + if (ret) + goto out; + + /* make sure proper unit conversion is applied */ + tuned_pa_tactivate = + ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) + / PA_TACTIVATE_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + tuned_pa_tactivate); + +out: + return ret; +} + +/** + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro + * @hba: per-adapter instance + * + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. + * This optimal value can help reduce the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) +{ + int ret = 0; + u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; + u32 max_hibern8_time, tuned_pa_hibern8time; + + ret = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &local_tx_hibern8_time_cap); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_hibern8_time_cap); + if (ret) + goto out; + + max_hibern8_time = max(local_tx_hibern8_time_cap, + peer_rx_hibern8_time_cap); + /* make sure proper unit conversion is applied */ + tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) + / PA_HIBERN8_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + tuned_pa_hibern8time); +out: + return ret; +} + +/** + * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is + * less than device PA_TACTIVATE time. + * @hba: per-adapter instance + * + * Some UFS devices require host PA_TACTIVATE to be lower than device + * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk + * for such devices. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 granularity, peer_granularity; + u32 pa_tactivate, peer_pa_tactivate; + u32 pa_tactivate_us, peer_pa_tactivate_us; + u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &granularity); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &peer_granularity); + if (ret) + goto out; + + if ((granularity < PA_GRANULARITY_MIN_VAL) || + (granularity > PA_GRANULARITY_MAX_VAL)) { + dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", + __func__, granularity); + return -EINVAL; + } + + if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || + (peer_granularity > PA_GRANULARITY_MAX_VAL)) { + dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", + __func__, peer_granularity); + return -EINVAL; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), + &peer_pa_tactivate); + if (ret) + goto out; + + pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; + peer_pa_tactivate_us = peer_pa_tactivate * + gran_to_us_table[peer_granularity - 1]; + + if (pa_tactivate_us >= peer_pa_tactivate_us) { + u32 new_peer_pa_tactivate; + + new_peer_pa_tactivate = pa_tactivate_us / + gran_to_us_table[peer_granularity - 1]; + new_peer_pa_tactivate++; + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + new_peer_pa_tactivate); + } + +out: + return ret; +} + +static void ufshcd_tune_unipro_params(struct ufs_hba *hba) +{ + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { + ufshcd_tune_pa_tactivate(hba); + ufshcd_tune_pa_hibern8time(hba); + } + + ufshcd_vops_apply_dev_quirks(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) + /* set 1ms timeout for PA_TACTIVATE */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) + ufshcd_quirk_tune_host_pa_tactivate(hba); +} + +static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) +{ + hba->ufs_stats.hibern8_exit_cnt = 0; + hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); + hba->req_abort_count = 0; +} + +static int ufshcd_device_geo_params_init(struct ufs_hba *hba) +{ + int err; + size_t buff_len; + u8 *desc_buf; + + buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, + desc_buf, buff_len); + if (err) { + dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", + __func__, err); + goto out; + } + + if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) + hba->dev_info.max_lu_supported = 32; + else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) + hba->dev_info.max_lu_supported = 8; + + if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) + ufshpb_get_geo_info(hba, desc_buf); + +out: + kfree(desc_buf); + return err; +} + +struct ufs_ref_clk { + unsigned long freq_hz; + enum ufs_ref_clk_freq val; +}; + +static struct ufs_ref_clk ufs_ref_clk_freqs[] = { + {19200000, REF_CLK_FREQ_19_2_MHZ}, + {26000000, REF_CLK_FREQ_26_MHZ}, + {38400000, REF_CLK_FREQ_38_4_MHZ}, + {52000000, REF_CLK_FREQ_52_MHZ}, + {0, REF_CLK_FREQ_INVAL}, +}; + +static enum ufs_ref_clk_freq +ufs_get_bref_clk_from_hz(unsigned long freq) +{ + int i; + + for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++) + if (ufs_ref_clk_freqs[i].freq_hz == freq) + return ufs_ref_clk_freqs[i].val; + + return REF_CLK_FREQ_INVAL; +} + +void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) +{ + unsigned long freq; + + freq = clk_get_rate(refclk); + + hba->dev_ref_clk_freq = + ufs_get_bref_clk_from_hz(freq); + + if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) + dev_err(hba->dev, + "invalid ref_clk setting = %ld\n", freq); +} + +static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) +{ + int err; + u32 ref_clk; + u32 freq = hba->dev_ref_clk_freq; + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk); + + if (err) { + dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", + err); + goto out; + } + + if (ref_clk == freq) + goto out; /* nothing to update */ + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq); + + if (err) { + dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", + ufs_ref_clk_freqs[freq].freq_hz); + goto out; + } + + dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", + ufs_ref_clk_freqs[freq].freq_hz); + +out: + return err; +} + +static int ufshcd_device_params_init(struct ufs_hba *hba) +{ + bool flag; + int ret, i; + + /* Init device descriptor sizes */ + for (i = 0; i < QUERY_DESC_IDN_MAX; i++) + hba->desc_size[i] = QUERY_DESC_MAX_SIZE; + + /* Init UFS geometry descriptor related parameters */ + ret = ufshcd_device_geo_params_init(hba); + if (ret) + goto out; + + /* Check and apply UFS device quirks */ + ret = ufs_get_device_desc(hba); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + goto out; + } + + ufshcd_get_ref_clk_gating_wait(hba); + + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag)) + hba->dev_info.f_power_on_wp_en = flag; + + /* Probe maximum power mode co-supported by both UFS host and device */ + if (ufshcd_get_max_pwr_mode(hba)) + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); +out: + return ret; +} + +/** + * ufshcd_add_lus - probe and add UFS logical units + * @hba: per-adapter instance + */ +static int ufshcd_add_lus(struct ufs_hba *hba) +{ + int ret; + + /* Add required well known logical units to scsi mid layer */ + ret = ufshcd_scsi_add_wlus(hba); + if (ret) + goto out; + + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; + hba->clk_scaling.is_allowed = true; + + ret = ufshcd_devfreq_init(hba); + if (ret) + goto out; + + hba->clk_scaling.is_enabled = true; + ufshcd_init_clk_scaling_sysfs(hba); + } + + ufs_bsg_probe(hba); + ufshpb_init(hba); + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + +out: + return ret; +} + +/** + * ufshcd_probe_hba - probe hba to detect device and initialize it + * @hba: per-adapter instance + * @init_dev_params: whether or not to call ufshcd_device_params_init(). + * + * Execute link-startup and verify device initialization + */ +static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) +{ + int ret; + unsigned long flags; + ktime_t start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + ret = ufshcd_link_startup(hba); + if (ret) + goto out; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto out; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + ret = ufshcd_verify_dev_init(hba); + if (ret) + goto out; + + /* Initiate UFS initialization, and waiting until completion */ + ret = ufshcd_complete_dev_init(hba); + if (ret) + goto out; + + /* + * Initialize UFS device parameters used by driver, these + * parameters are associated with UFS descriptors. + */ + if (init_dev_params) { + ret = ufshcd_device_params_init(hba); + if (ret) + goto out; + } + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + /* Gear up to HS gear if supported */ + if (hba->max_pwr_info.is_valid) { + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + goto out; + } + ufshcd_print_pwr_info(hba); + } + + /* + * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) + * and for removable UFS card as well, hence always set the parameter. + * Note: Error handler may issue the device reset hence resetting + * bActiveICCLevel as well so it is always safe to set this here. + */ + ufshcd_set_active_icc_lvl(hba); + + ufshcd_wb_config(hba); + if (hba->ee_usr_mask) + ufshcd_write_ee_control(hba); + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); + + ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT); +out: + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} + +/** + * ufshcd_async_scan - asynchronous execution for probing hba + * @data: data pointer to pass to this function + * @cookie: cookie data + */ +static void ufshcd_async_scan(void *data, async_cookie_t cookie) +{ + struct ufs_hba *hba = (struct ufs_hba *)data; + int ret; + + down(&hba->host_sem); + /* Initialize hba, detect and initialize UFS device */ + ret = ufshcd_probe_hba(hba, true); + up(&hba->host_sem); + if (ret) + goto out; + + /* Probe and add UFS logical units */ + ret = ufshcd_add_lus(hba); +out: + /* + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. + */ + if (ret) { + pm_runtime_put_sync(hba->dev); + ufshcd_hba_exit(hba); + } +} + +static const struct attribute_group *ufshcd_driver_groups[] = { + &ufs_sysfs_unit_descriptor_group, + &ufs_sysfs_lun_attributes_group, +#ifdef CONFIG_SCSI_UFS_HPB + &ufs_sysfs_hpb_stat_group, + &ufs_sysfs_hpb_param_group, +#endif + NULL, +}; + +static struct ufs_hba_variant_params ufs_hba_vps = { + .hba_enable_delay_us = 1000, + .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40), + .devfreq_profile.polling_ms = 100, + .devfreq_profile.target = ufshcd_devfreq_target, + .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status, + .ondemand_data.upthreshold = 70, + .ondemand_data.downdifferential = 5, +}; + +static struct scsi_host_template ufshcd_driver_template = { + .module = THIS_MODULE, + .name = UFSHCD, + .proc_name = UFSHCD, + .map_queues = ufshcd_map_queues, + .queuecommand = ufshcd_queuecommand, + .mq_poll = ufshcd_poll, + .slave_alloc = ufshcd_slave_alloc, + .slave_configure = ufshcd_slave_configure, + .slave_destroy = ufshcd_slave_destroy, + .change_queue_depth = ufshcd_change_queue_depth, + .eh_abort_handler = ufshcd_abort, + .eh_device_reset_handler = ufshcd_eh_device_reset_handler, + .eh_host_reset_handler = ufshcd_eh_host_reset_handler, + .this_id = -1, + .sg_tablesize = SG_ALL, + .cmd_per_lun = UFSHCD_CMD_PER_LUN, + .can_queue = UFSHCD_CAN_QUEUE, + .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, + .max_host_blocked = 1, + .track_queue_depth = 1, + .sdev_groups = ufshcd_driver_groups, + .dma_boundary = PAGE_SIZE - 1, + .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, +}; + +static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, + int ua) +{ + int ret; + + if (!vreg) + return 0; + + /* + * "set_load" operation shall be required on those regulators + * which specifically configured current limitation. Otherwise + * zero max_uA may cause unexpected behavior when regulator is + * enabled or set as high power mode. + */ + if (!vreg->max_uA) + return 0; + + ret = regulator_set_load(vreg->reg, ua); + if (ret < 0) { + dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", + __func__, vreg->name, ua, ret); + } + + return ret; +} + +static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); +} + +static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + if (!vreg) + return 0; + + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); +} + +static int ufshcd_config_vreg(struct device *dev, + struct ufs_vreg *vreg, bool on) +{ + if (regulator_count_voltages(vreg->reg) <= 0) + return 0; + + return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0); +} + +static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || vreg->enabled) + goto out; + + ret = ufshcd_config_vreg(dev, vreg, true); + if (!ret) + ret = regulator_enable(vreg->reg); + + if (!ret) + vreg->enabled = true; + else + dev_err(dev, "%s: %s enable failed, err=%d\n", + __func__, vreg->name, ret); +out: + return ret; +} + +static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || !vreg->enabled || vreg->always_on) + goto out; + + ret = regulator_disable(vreg->reg); + + if (!ret) { + /* ignore errors on applying disable config */ + ufshcd_config_vreg(dev, vreg, false); + vreg->enabled = false; + } else { + dev_err(dev, "%s: %s disable failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + ret = ufshcd_toggle_vreg(dev, info->vcc, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq2, on); + +out: + if (ret) { + ufshcd_toggle_vreg(dev, info->vccq2, false); + ufshcd_toggle_vreg(dev, info->vccq, false); + ufshcd_toggle_vreg(dev, info->vcc, false); + } + return ret; +} + +static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); +} + +static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg) + goto out; + + vreg->reg = devm_regulator_get(dev, vreg->name); + if (IS_ERR(vreg->reg)) { + ret = PTR_ERR(vreg->reg); + dev_err(dev, "%s: %s get failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_init_vreg(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + ret = ufshcd_get_vreg(dev, info->vcc); + if (ret) + goto out; + + ret = ufshcd_get_vreg(dev, info->vccq); + if (!ret) + ret = ufshcd_get_vreg(dev, info->vccq2); +out: + return ret; +} + +static int ufshcd_init_hba_vreg(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + return ufshcd_get_vreg(hba->dev, info->vdd_hba); +} + +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + unsigned long flags; + ktime_t start = ktime_get(); + bool clk_state_changed = false; + + if (list_empty(head)) + goto out; + + ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); + if (ret) + return ret; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + /* + * Don't disable clocks which are needed + * to keep the link active. + */ + if (ufshcd_is_link_active(hba) && + clki->keep_link_active) + continue; + + clk_state_changed = on ^ clki->enabled; + if (on && !clki->enabled) { + ret = clk_prepare_enable(clki->clk); + if (ret) { + dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", + __func__, clki->name, ret); + goto out; + } + } else if (!on && clki->enabled) { + clk_disable_unprepare(clki->clk); + } + clki->enabled = on; + dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, + clki->name, on ? "en" : "dis"); + } + } + + ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); + if (ret) + return ret; + +out: + if (ret) { + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) + clk_disable_unprepare(clki->clk); + } + } else if (!ret && on) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.state = CLKS_ON; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + + if (clk_state_changed) + trace_ufshcd_profile_clk_gating(dev_name(hba->dev), + (on ? "on" : "off"), + ktime_to_us(ktime_sub(ktime_get(), start)), ret); + return ret; +} + +static int ufshcd_init_clocks(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct device *dev = hba->dev; + struct list_head *head = &hba->clk_list_head; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!clki->name) + continue; + + clki->clk = devm_clk_get(dev, clki->name); + if (IS_ERR(clki->clk)) { + ret = PTR_ERR(clki->clk); + dev_err(dev, "%s: %s clk get failed, %d\n", + __func__, clki->name, ret); + goto out; + } + + /* + * Parse device ref clk freq as per device tree "ref_clk". + * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL + * in ufshcd_alloc_host(). + */ + if (!strcmp(clki->name, "ref_clk")) + ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); + + if (clki->max_freq) { + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + goto out; + } + clki->curr_freq = clki->max_freq; + } + dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } +out: + return ret; +} + +static int ufshcd_variant_hba_init(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->vops) + goto out; + + err = ufshcd_vops_init(hba); + if (err) + dev_err(hba->dev, "%s: variant %s init failed err %d\n", + __func__, ufshcd_get_var_name(hba), err); +out: + return err; +} + +static void ufshcd_variant_hba_exit(struct ufs_hba *hba) +{ + if (!hba->vops) + return; + + ufshcd_vops_exit(hba); +} + +static int ufshcd_hba_init(struct ufs_hba *hba) +{ + int err; + + /* + * Handle host controller power separately from the UFS device power + * rails as it will help controlling the UFS host controller power + * collapse easily which is different than UFS device power collapse. + * Also, enable the host controller power before we go ahead with rest + * of the initialization here. + */ + err = ufshcd_init_hba_vreg(hba); + if (err) + goto out; + + err = ufshcd_setup_hba_vreg(hba, true); + if (err) + goto out; + + err = ufshcd_init_clocks(hba); + if (err) + goto out_disable_hba_vreg; + + err = ufshcd_setup_clocks(hba, true); + if (err) + goto out_disable_hba_vreg; + + err = ufshcd_init_vreg(hba); + if (err) + goto out_disable_clks; + + err = ufshcd_setup_vreg(hba, true); + if (err) + goto out_disable_clks; + + err = ufshcd_variant_hba_init(hba); + if (err) + goto out_disable_vreg; + + ufs_debugfs_hba_init(hba); + + hba->is_powered = true; + goto out; + +out_disable_vreg: + ufshcd_setup_vreg(hba, false); +out_disable_clks: + ufshcd_setup_clocks(hba, false); +out_disable_hba_vreg: + ufshcd_setup_hba_vreg(hba, false); +out: + return err; +} + +static void ufshcd_hba_exit(struct ufs_hba *hba) +{ + if (hba->is_powered) { + ufshcd_exit_clk_scaling(hba); + ufshcd_exit_clk_gating(hba); + if (hba->eh_wq) + destroy_workqueue(hba->eh_wq); + ufs_debugfs_hba_exit(hba); + ufshcd_variant_hba_exit(hba); + ufshcd_setup_vreg(hba, false); + ufshcd_setup_clocks(hba, false); + ufshcd_setup_hba_vreg(hba, false); + hba->is_powered = false; + ufs_put_device_desc(hba); + } +} + +/** + * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device + * power mode + * @hba: per adapter instance + * @pwr_mode: device power mode to set + * + * Returns 0 if requested power mode is set successfully + * Returns < 0 if failed to set the requested power mode + */ +static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, + enum ufs_dev_pwr_mode pwr_mode) +{ + unsigned char cmd[6] = { START_STOP }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdp; + unsigned long flags; + int ret, retries; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdp = hba->ufs_device_wlun; + if (sdp) { + ret = scsi_device_get(sdp); + if (!ret && !scsi_device_online(sdp)) { + ret = -ENODEV; + scsi_device_put(sdp); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + return ret; + + /* + * If scsi commands fail, the scsi mid-layer schedules scsi error- + * handling, which would wait for host to be resumed. Since we know + * we are functional while we are here, skip host resume in error + * handling context. + */ + hba->host->eh_noresume = 1; + + cmd[4] = pwr_mode << 4; + + /* + * Current function would be generally called from the power management + * callbacks hence set the RQF_PM flag so that it doesn't resume the + * already suspended childs. + */ + for (retries = 3; retries > 0; --retries) { + ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); + if (!scsi_status_is_check_condition(ret) || + !scsi_sense_valid(&sshdr) || + sshdr.sense_key != UNIT_ATTENTION) + break; + } + if (ret) { + sdev_printk(KERN_WARNING, sdp, + "START_STOP failed for power mode: %d, result %x\n", + pwr_mode, ret); + if (ret > 0) { + if (scsi_sense_valid(&sshdr)) + scsi_print_sense_hdr(sdp, NULL, &sshdr); + ret = -EIO; + } + } + + if (!ret) + hba->curr_dev_pwr_mode = pwr_mode; + + scsi_device_put(sdp); + hba->host->eh_noresume = 0; + return ret; +} + +static int ufshcd_link_state_transition(struct ufs_hba *hba, + enum uic_link_state req_link_state, + int check_for_bkops) +{ + int ret = 0; + + if (req_link_state == hba->uic_link_state) + return 0; + + if (req_link_state == UIC_LINK_HIBERN8_STATE) { + ret = ufshcd_uic_hibern8_enter(hba); + if (!ret) { + ufshcd_set_link_hibern8(hba); + } else { + dev_err(hba->dev, "%s: hibern8 enter failed %d\n", + __func__, ret); + goto out; + } + } + /* + * If autobkops is enabled, link can't be turned off because + * turning off the link would also turn off the device, except in the + * case of DeepSleep where the device is expected to remain powered. + */ + else if ((req_link_state == UIC_LINK_OFF_STATE) && + (!check_for_bkops || !hba->auto_bkops_enabled)) { + /* + * Let's make sure that link is in low power mode, we are doing + * this currently by putting the link in Hibern8. Otherway to + * put the link in low power mode is to send the DME end point + * to device and then send the DME reset command to local + * unipro. But putting the link in hibern8 is much faster. + * + * Note also that putting the link in Hibern8 is a requirement + * for entering DeepSleep. + */ + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) { + dev_err(hba->dev, "%s: hibern8 enter failed %d\n", + __func__, ret); + goto out; + } + /* + * Change controller state to "reset state" which + * should also put the link in off/reset state + */ + ufshcd_hba_stop(hba); + /* + * TODO: Check if we need any delay to make sure that + * controller is reset + */ + ufshcd_set_link_off(hba); + } + +out: + return ret; +} + +static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) +{ + bool vcc_off = false; + + /* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ + if (!ufshcd_is_link_active(hba) && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) + usleep_range(2000, 2100); + + /* + * If UFS device is either in UFS_Sleep turn off VCC rail to save some + * power. + * + * If UFS device and link is in OFF state, all power supplies (VCC, + * VCCQ, VCCQ2) can be turned off if power on write protect is not + * required. If UFS link is inactive (Hibern8 or OFF state) and device + * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. + * + * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway + * in low power state which would save some power. + * + * If Write Booster is enabled and the device needs to flush the WB + * buffer OR if bkops status is urgent for WB, keep Vcc on. + */ + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ufshcd_setup_vreg(hba, false); + vcc_off = true; + } else if (!ufshcd_is_ufs_dev_active(hba)) { + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); + vcc_off = true; + if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); + } + } + + /* + * Some UFS devices require delay after VCC power rail is turned-off. + */ + if (vcc_off && hba->vreg_info.vcc && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) + usleep_range(5000, 5100); +} + +#ifdef CONFIG_PM +static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) +{ + int ret = 0; + + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ret = ufshcd_setup_vreg(hba, true); + } else if (!ufshcd_is_ufs_dev_active(hba)) { + if (!ufshcd_is_link_active(hba)) { + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); + if (ret) + goto vcc_disable; + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); + if (ret) + goto vccq_lpm; + } + ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); + } + goto out; + +vccq_lpm: + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); +vcc_disable: + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); +out: + return ret; +} +#endif /* CONFIG_PM */ + +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) + ufshcd_setup_hba_vreg(hba, false); +} + +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) + ufshcd_setup_hba_vreg(hba, true); +} + +static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + int ret = 0; + int check_for_bkops; + enum ufs_pm_level pm_lvl; + enum ufs_dev_pwr_mode req_dev_pwr_mode; + enum uic_link_state req_link_state; + + hba->pm_op_in_progress = true; + if (pm_op != UFS_SHUTDOWN_PM) { + pm_lvl = pm_op == UFS_RUNTIME_PM ? + hba->rpm_lvl : hba->spm_lvl; + req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); + req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); + } else { + req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; + req_link_state = UIC_LINK_OFF_STATE; + } + + ufshpb_suspend(hba); + + /* + * If we can't transition into any of the low power modes + * just gate the clocks. + */ + ufshcd_hold(hba, false); + hba->clk_gating.is_suspended = true; + + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, true); + + if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && + req_link_state == UIC_LINK_ACTIVE_STATE) { + goto vops_suspend; + } + + if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && + (req_link_state == hba->uic_link_state)) + goto enable_scaling; + + /* UFS device & link must be active before we enter in this function */ + if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { + ret = -EINVAL; + goto enable_scaling; + } + + if (pm_op == UFS_RUNTIME_PM) { + if (ufshcd_can_autobkops_during_suspend(hba)) { + /* + * The device is idle with no requests in the queue, + * allow background operations if bkops status shows + * that performance might be impacted. + */ + ret = ufshcd_urgent_bkops(hba); + if (ret) + goto enable_scaling; + } else { + /* make sure that auto bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + } + /* + * If device needs to do BKOP or WB buffer flush during + * Hibern8, keep device power mode as "active power mode" + * and VCC supply. + */ + hba->dev_info.b_rpm_dev_flush_capable = + hba->auto_bkops_enabled || + (((req_link_state == UIC_LINK_HIBERN8_STATE) || + ((req_link_state == UIC_LINK_ACTIVE_STATE) && + ufshcd_is_auto_hibern8_enabled(hba))) && + ufshcd_wb_need_flush(hba)); + } + + flush_work(&hba->eeh_work); + + ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); + if (ret) + goto enable_scaling; + + if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { + if (pm_op != UFS_RUNTIME_PM) + /* ensure that bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + + if (!hba->dev_info.b_rpm_dev_flush_capable) { + ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); + if (ret) + goto enable_scaling; + } + } + + /* + * In the case of DeepSleep, the device is expected to remain powered + * with the link off, so do not check for bkops. + */ + check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); + ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); + if (ret) + goto set_dev_active; + +vops_suspend: + /* + * Call vendor specific suspend callback. As these callbacks may access + * vendor specific host controller register space call them before the + * host clocks are ON. + */ + ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); + if (ret) + goto set_link_active; + goto out; + +set_link_active: + /* + * Device hardware reset is required to exit DeepSleep. Also, for + * DeepSleep, the link is off so host reset and restore will be done + * further below. + */ + if (ufshcd_is_ufs_dev_deepsleep(hba)) { + ufshcd_device_reset(hba); + WARN_ON(!ufshcd_is_link_off(hba)); + } + if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) + ufshcd_set_link_active(hba); + else if (ufshcd_is_link_off(hba)) + ufshcd_host_reset_and_restore(hba); +set_dev_active: + /* Can also get here needing to exit DeepSleep */ + if (ufshcd_is_ufs_dev_deepsleep(hba)) { + ufshcd_device_reset(hba); + ufshcd_host_reset_and_restore(hba); + } + if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) + ufshcd_disable_auto_bkops(hba); +enable_scaling: + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + + hba->dev_info.b_rpm_dev_flush_capable = false; +out: + if (hba->dev_info.b_rpm_dev_flush_capable) { + schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, + msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); + } + + if (ret) { + ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); + ufshpb_resume(hba); + } + hba->pm_op_in_progress = false; + return ret; +} + +#ifdef CONFIG_PM +static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + int ret; + enum uic_link_state old_link_state = hba->uic_link_state; + + hba->pm_op_in_progress = true; + + /* + * Call vendor specific resume callback. As these callbacks may access + * vendor specific host controller register space call them when the + * host clocks are ON. + */ + ret = ufshcd_vops_resume(hba, pm_op); + if (ret) + goto out; + + /* For DeepSleep, the only supported option is to have the link off */ + WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); + + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (!ret) { + ufshcd_set_link_active(hba); + } else { + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + goto vendor_suspend; + } + } else if (ufshcd_is_link_off(hba)) { + /* + * A full initialization of the host and the device is + * required since the link was put to off during suspend. + * Note, in the case of DeepSleep, the device will exit + * DeepSleep due to device reset. + */ + ret = ufshcd_reset_and_restore(hba); + /* + * ufshcd_reset_and_restore() should have already + * set the link state as active + */ + if (ret || !ufshcd_is_link_active(hba)) + goto vendor_suspend; + } + + if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); + if (ret) + goto set_old_link_state; + } + + if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) + ufshcd_enable_auto_bkops(hba); + else + /* + * If BKOPs operations are urgently needed at this moment then + * keep auto-bkops enabled or else disable it. + */ + ufshcd_urgent_bkops(hba); + + if (hba->ee_usr_mask) + ufshcd_write_ee_control(hba); + + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + + if (hba->dev_info.b_rpm_dev_flush_capable) { + hba->dev_info.b_rpm_dev_flush_capable = false; + cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); + } + + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); + + ufshpb_resume(hba); + goto out; + +set_old_link_state: + ufshcd_link_state_transition(hba, old_link_state, 0); +vendor_suspend: + ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); + ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); +out: + if (ret) + ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); + hba->pm_op_in_progress = false; + return ret; +} + +static int ufshcd_wl_runtime_suspend(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + + ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); + if (ret) + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); + + trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} + +static int ufshcd_wl_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret = 0; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + + ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); + if (ret) + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); + + trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int ufshcd_wl_suspend(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret = 0; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + down(&hba->host_sem); + + if (pm_runtime_suspended(dev)) + goto out; + + ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); + if (ret) { + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); + up(&hba->host_sem); + } + +out: + if (!ret) + hba->is_sys_suspended = true; + trace_ufshcd_wl_suspend(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} + +static int ufshcd_wl_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + int ret = 0; + ktime_t start = ktime_get(); + + hba = shost_priv(sdev->host); + + if (pm_runtime_suspended(dev)) + goto out; + + ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); + if (ret) + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); +out: + trace_ufshcd_wl_resume(dev_name(dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + if (!ret) + hba->is_sys_suspended = false; + up(&hba->host_sem); + return ret; +} +#endif + +static void ufshcd_wl_shutdown(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + down(&hba->host_sem); + hba->shutting_down = true; + up(&hba->host_sem); + + /* Turn on everything while shutting down */ + ufshcd_rpm_get_sync(hba); + scsi_device_quiesce(sdev); + shost_for_each_device(sdev, hba->host) { + if (sdev == hba->ufs_device_wlun) + continue; + scsi_device_quiesce(sdev); + } + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); +} + +/** + * ufshcd_suspend - helper function for suspend operations + * @hba: per adapter instance + * + * This function will put disable irqs, turn off clocks + * and set vreg and hba-vreg in lpm mode. + */ +static int ufshcd_suspend(struct ufs_hba *hba) +{ + int ret; + + if (!hba->is_powered) + return 0; + /* + * Disable the host irq as host controller as there won't be any + * host controller transaction expected till resume. + */ + ufshcd_disable_irq(hba); + ret = ufshcd_setup_clocks(hba, false); + if (ret) { + ufshcd_enable_irq(hba); + return ret; + } + if (ufshcd_is_clkgating_allowed(hba)) { + hba->clk_gating.state = CLKS_OFF; + trace_ufshcd_clk_gating(dev_name(hba->dev), + hba->clk_gating.state); + } + + ufshcd_vreg_set_lpm(hba); + /* Put the host controller in low power mode if possible */ + ufshcd_hba_vreg_set_lpm(hba); + return ret; +} + +#ifdef CONFIG_PM +/** + * ufshcd_resume - helper function for resume operations + * @hba: per adapter instance + * + * This function basically turns on the regulators, clocks and + * irqs of the hba. + * + * Returns 0 for success and non-zero for failure + */ +static int ufshcd_resume(struct ufs_hba *hba) +{ + int ret; + + if (!hba->is_powered) + return 0; + + ufshcd_hba_vreg_set_hpm(hba); + ret = ufshcd_vreg_set_hpm(hba); + if (ret) + goto out; + + /* Make sure clocks are enabled before accessing controller */ + ret = ufshcd_setup_clocks(hba, true); + if (ret) + goto disable_vreg; + + /* enable the host irq as host controller would be active soon */ + ufshcd_enable_irq(hba); + goto out; + +disable_vreg: + ufshcd_vreg_set_lpm(hba); +out: + if (ret) + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); + return ret; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +/** + * ufshcd_system_suspend - system suspend callback + * @dev: Device associated with the UFS controller. + * + * Executed before putting the system into a sleep state in which the contents + * of main memory are preserved. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_system_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret = 0; + ktime_t start = ktime_get(); + + if (pm_runtime_suspended(hba->dev)) + goto out; + + ret = ufshcd_suspend(hba); +out: + trace_ufshcd_system_suspend(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} +EXPORT_SYMBOL(ufshcd_system_suspend); + +/** + * ufshcd_system_resume - system resume callback + * @dev: Device associated with the UFS controller. + * + * Executed after waking the system up from a sleep state in which the contents + * of main memory were preserved. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_system_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + ktime_t start = ktime_get(); + int ret = 0; + + if (pm_runtime_suspended(hba->dev)) + goto out; + + ret = ufshcd_resume(hba); + +out: + trace_ufshcd_system_resume(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + + return ret; +} +EXPORT_SYMBOL(ufshcd_system_resume); +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +/** + * ufshcd_runtime_suspend - runtime suspend callback + * @dev: Device associated with the UFS controller. + * + * Check the description of ufshcd_suspend() function for more details. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + ktime_t start = ktime_get(); + + ret = ufshcd_suspend(hba); + + trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} +EXPORT_SYMBOL(ufshcd_runtime_suspend); + +/** + * ufshcd_runtime_resume - runtime resume routine + * @dev: Device associated with the UFS controller. + * + * This function basically brings controller + * to active state. Following operations are done in this function: + * + * 1. Turn on all the controller related clocks + * 2. Turn ON VCC rail + */ +int ufshcd_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + ktime_t start = ktime_get(); + + ret = ufshcd_resume(hba); + + trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); + return ret; +} +EXPORT_SYMBOL(ufshcd_runtime_resume); +#endif /* CONFIG_PM */ + +/** + * ufshcd_shutdown - shutdown routine + * @hba: per adapter instance + * + * This function would turn off both UFS device and UFS hba + * regulators. It would also disable clocks. + * + * Returns 0 always to allow force shutdown even in case of errors. + */ +int ufshcd_shutdown(struct ufs_hba *hba) +{ + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) + goto out; + + pm_runtime_get_sync(hba->dev); + + ufshcd_suspend(hba); +out: + hba->is_powered = false; + /* allow force shutdown even in case of errors */ + return 0; +} +EXPORT_SYMBOL(ufshcd_shutdown); + +/** + * ufshcd_remove - de-allocate SCSI host and host memory space + * data structure memory + * @hba: per adapter instance + */ +void ufshcd_remove(struct ufs_hba *hba) +{ + if (hba->ufs_device_wlun) + ufshcd_rpm_get_sync(hba); + ufs_hwmon_remove(hba); + ufs_bsg_remove(hba); + ufshpb_remove(hba); + ufs_sysfs_remove_nodes(hba->dev); + blk_cleanup_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + scsi_remove_host(hba->host); + /* disable interrupts */ + ufshcd_disable_intr(hba, hba->intr_mask); + ufshcd_hba_stop(hba); + ufshcd_hba_exit(hba); +} +EXPORT_SYMBOL_GPL(ufshcd_remove); + +/** + * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) + * @hba: pointer to Host Bus Adapter (HBA) + */ +void ufshcd_dealloc_host(struct ufs_hba *hba) +{ + scsi_host_put(hba->host); +} +EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); + +/** + * ufshcd_set_dma_mask - Set dma mask based on the controller + * addressing capability + * @hba: per adapter instance + * + * Returns 0 for success, non-zero for failure + */ +static int ufshcd_set_dma_mask(struct ufs_hba *hba) +{ + if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { + if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) + return 0; + } + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + +/** + * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) + * @dev: pointer to device handle + * @hba_handle: driver private handle + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) +{ + struct Scsi_Host *host; + struct ufs_hba *hba; + int err = 0; + + if (!dev) { + dev_err(dev, + "Invalid memory reference for dev is NULL\n"); + err = -ENODEV; + goto out_error; + } + + host = scsi_host_alloc(&ufshcd_driver_template, + sizeof(struct ufs_hba)); + if (!host) { + dev_err(dev, "scsi_host_alloc failed\n"); + err = -ENOMEM; + goto out_error; + } + host->nr_maps = HCTX_TYPE_POLL + 1; + hba = shost_priv(host); + hba->host = host; + hba->dev = dev; + hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; + hba->nop_out_timeout = NOP_OUT_TIMEOUT; + INIT_LIST_HEAD(&hba->clk_list_head); + spin_lock_init(&hba->outstanding_lock); + + *hba_handle = hba; + +out_error: + return err; +} +EXPORT_SYMBOL(ufshcd_alloc_host); + +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + +/** + * ufshcd_init - Driver initialization routine + * @hba: per-adapter instance + * @mmio_base: base register address + * @irq: Interrupt line of device + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) +{ + int err; + struct Scsi_Host *host = hba->host; + struct device *dev = hba->dev; + char eh_wq_name[sizeof("ufs_eh_wq_00")]; + + /* + * dev_set_drvdata() must be called before any callbacks are registered + * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, + * sysfs). + */ + dev_set_drvdata(dev, hba); + + if (!mmio_base) { + dev_err(hba->dev, + "Invalid memory reference for mmio_base is NULL\n"); + err = -ENODEV; + goto out_error; + } + + hba->mmio_base = mmio_base; + hba->irq = irq; + hba->vps = &ufs_hba_vps; + + err = ufshcd_hba_init(hba); + if (err) + goto out_error; + + /* Read capabilities registers */ + err = ufshcd_hba_capabilities(hba); + if (err) + goto out_disable; + + /* Get UFS version supported by the controller */ + hba->ufs_version = ufshcd_get_ufs_version(hba); + + /* Get Interrupt bit mask per version */ + hba->intr_mask = ufshcd_get_intr_mask(hba); + + err = ufshcd_set_dma_mask(hba); + if (err) { + dev_err(hba->dev, "set dma mask failed\n"); + goto out_disable; + } + + /* Allocate memory for host memory space */ + err = ufshcd_memory_alloc(hba); + if (err) { + dev_err(hba->dev, "Memory allocation failed\n"); + goto out_disable; + } + + /* Configure LRB */ + ufshcd_host_memory_configure(hba); + + host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; + host->max_id = UFSHCD_MAX_ID; + host->max_lun = UFS_MAX_LUNS; + host->max_channel = UFSHCD_MAX_CHANNEL; + host->unique_id = host->host_no; + host->max_cmd_len = UFS_CDB_SIZE; + + hba->max_pwr_info.is_valid = false; + + /* Initialize work queues */ + snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", + hba->host->host_no); + hba->eh_wq = create_singlethread_workqueue(eh_wq_name); + if (!hba->eh_wq) { + dev_err(hba->dev, "%s: failed to create eh workqueue\n", + __func__); + err = -ENOMEM; + goto out_disable; + } + INIT_WORK(&hba->eh_work, ufshcd_err_handler); + INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); + + sema_init(&hba->host_sem, 1); + + /* Initialize UIC command mutex */ + mutex_init(&hba->uic_cmd_mutex); + + /* Initialize mutex for device management commands */ + mutex_init(&hba->dev_cmd.lock); + + /* Initialize mutex for exception event control */ + mutex_init(&hba->ee_ctrl_mutex); + + init_rwsem(&hba->clk_scaling_lock); + + ufshcd_init_clk_gating(hba); + + ufshcd_init_clk_scaling(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + /* + * Make sure that UFS interrupts are disabled and any pending interrupt + * status is cleared before registering UFS interrupt handler. + */ + mb(); + + /* IRQ registration */ + err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + if (err) { + dev_err(hba->dev, "request irq failed\n"); + goto out_disable; + } else { + hba->is_irq_enabled = true; + } + + err = scsi_add_host(host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + goto out_disable; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto out_remove_scsi_host; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + /* Reset the attached device */ + ufshcd_device_reset(hba); + + ufshcd_init_crypto(hba); + + /* Host controller enable */ + err = ufshcd_hba_enable(hba); + if (err) { + dev_err(hba->dev, "Host controller enable failed\n"); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); + goto free_tmf_queue; + } + + /* + * Set the default power management level for runtime and system PM. + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + + INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, + ufshcd_rpm_dev_flush_recheck_work); + + /* Set the default auto-hiberate idle timer value to 150 ms */ + if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { + hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); + } + + /* Hold auto suspend until async scan completes */ + pm_runtime_get_sync(dev); + atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* + * We are assuming that device wasn't put in sleep/power-down + * state exclusively during the boot stage before kernel. + * This assumption helps avoid doing link startup twice during + * ufshcd_probe_hba(). + */ + ufshcd_set_ufs_dev_active(hba); + + async_schedule(ufshcd_async_scan, hba); + ufs_sysfs_add_nodes(hba->dev); + + device_enable_async_suspend(dev); + return 0; + +free_tmf_queue: + blk_cleanup_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +out_remove_scsi_host: + scsi_remove_host(hba->host); +out_disable: + hba->is_irq_enabled = false; + ufshcd_hba_exit(hba); +out_error: + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_init); + +void ufshcd_resume_complete(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (hba->complete_put) { + ufshcd_rpm_put(hba); + hba->complete_put = false; + } +} +EXPORT_SYMBOL_GPL(ufshcd_resume_complete); + +static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) +{ + struct device *dev = &hba->ufs_device_wlun->sdev_gendev; + enum ufs_dev_pwr_mode dev_pwr_mode; + enum uic_link_state link_state; + unsigned long flags; + bool res; + + spin_lock_irqsave(&dev->power.lock, flags); + dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); + link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); + res = pm_runtime_suspended(dev) && + hba->curr_dev_pwr_mode == dev_pwr_mode && + hba->uic_link_state == link_state && + !hba->dev_info.b_rpm_dev_flush_capable; + spin_unlock_irqrestore(&dev->power.lock, flags); + + return res; +} + +int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + + /* + * SCSI assumes that runtime-pm and system-pm for scsi drivers + * are same. And it doesn't wake up the device for system-suspend + * if it's runtime suspended. But ufs doesn't follow that. + * Refer ufshcd_resume_complete() + */ + if (hba->ufs_device_wlun) { + /* Prevent runtime suspend */ + ufshcd_rpm_get_noresume(hba); + /* + * Check if already runtime suspended in same state as system + * suspend would be. + */ + if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { + /* RPM state is not ok for SPM, so runtime resume */ + ret = ufshcd_rpm_resume(hba); + if (ret < 0 && ret != -EACCES) { + ufshcd_rpm_put(hba); + return ret; + } + } + hba->complete_put = true; + } + return 0; +} +EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare); + +int ufshcd_suspend_prepare(struct device *dev) +{ + return __ufshcd_suspend_prepare(dev, true); +} +EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); + +#ifdef CONFIG_PM_SLEEP +static int ufshcd_wl_poweroff(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba = shost_priv(sdev->host); + + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); + return 0; +} +#endif + +static int ufshcd_wl_probe(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!is_device_wlun(sdev)) + return -ENODEV; + + blk_pm_runtime_init(sdev->request_queue, dev); + pm_runtime_set_autosuspend_delay(dev, 0); + pm_runtime_allow(dev); + + return 0; +} + +static int ufshcd_wl_remove(struct device *dev) +{ + pm_runtime_forbid(dev); + return 0; +} + +static const struct dev_pm_ops ufshcd_wl_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = ufshcd_wl_suspend, + .resume = ufshcd_wl_resume, + .freeze = ufshcd_wl_suspend, + .thaw = ufshcd_wl_resume, + .poweroff = ufshcd_wl_poweroff, + .restore = ufshcd_wl_resume, +#endif + SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL) +}; + +/* + * ufs_dev_wlun_template - describes ufs device wlun + * ufs-device wlun - used to send pm commands + * All luns are consumers of ufs-device wlun. + * + * Currently, no sd driver is present for wluns. + * Hence the no specific pm operations are performed. + * With ufs design, SSU should be sent to ufs-device wlun. + * Hence register a scsi driver for ufs wluns only. + */ +static struct scsi_driver ufs_dev_wlun_template = { + .gendrv = { + .name = "ufs_device_wlun", + .owner = THIS_MODULE, + .probe = ufshcd_wl_probe, + .remove = ufshcd_wl_remove, + .pm = &ufshcd_wl_pm_ops, + .shutdown = ufshcd_wl_shutdown, + }, +}; + +static int __init ufshcd_core_init(void) +{ + int ret; + + /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */ + static_assert(sizeof(struct utp_transfer_cmd_desc) == + 2 * ALIGNED_UPIU_SIZE + + SG_ALL * sizeof(struct ufshcd_sg_entry)); + + ufs_debugfs_init(); + + ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); + if (ret) + ufs_debugfs_exit(); + return ret; +} + +static void __exit ufshcd_core_exit(void) +{ + ufs_debugfs_exit(); + scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); +} + +module_init(ufshcd_core_init); +module_exit(ufshcd_core_exit); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("Generic UFS host controller driver Core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c new file mode 100644 index 000000000000..de2bb8401bc4 --- /dev/null +++ b/drivers/ufs/core/ufshpb.c @@ -0,0 +1,2667 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee <ymhungry.lee@samsung.com> + * Jinyoung Choi <j-young.choi@samsung.com> + */ + +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/module.h> +#include <scsi/scsi_cmnd.h> + +#include "ufshcd-priv.h" +#include "ufshpb.h" +#include "../../scsi/sd.h" + +#define ACTIVATION_THRESHOLD 8 /* 8 IOs */ +#define READ_TO_MS 1000 +#define READ_TO_EXPIRIES 100 +#define POLLING_INTERVAL_MS 200 +#define THROTTLE_MAP_REQ_DEFAULT 1 + +/* memory management */ +static struct kmem_cache *ufshpb_mctx_cache; +static mempool_t *ufshpb_mctx_pool; +static mempool_t *ufshpb_page_pool; +/* A cache size of 2MB can cache ppn in the 1GB range. */ +static unsigned int ufshpb_host_map_kbytes = 2048; +static int tot_active_srgn_pages; + +static struct workqueue_struct *ufshpb_wq; + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx); + +bool ufshpb_is_allowed(struct ufs_hba *hba) +{ + return !(hba->ufshpb_dev.hpb_disabled); +} + +/* HPB version 1.0 is called as legacy version. */ +bool ufshpb_is_legacy(struct ufs_hba *hba) +{ + return hba->ufshpb_dev.is_legacy; +} + +static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev) +{ + return sdev->hostdata; +} + +static int ufshpb_get_state(struct ufshpb_lu *hpb) +{ + return atomic_read(&hpb->hpb_state); +} + +static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) +{ + atomic_set(&hpb->hpb_state, state); +} + +static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + return rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID; +} + +static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd) +{ + return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ; +} + +static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd) +{ + return op_is_write(req_op(scsi_cmd_to_rq(cmd))) || + op_is_discard(req_op(scsi_cmd_to_rq(cmd))); +} + +static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len) +{ + return transfer_len <= hpb->pre_req_max_tr_len; +} + +static bool ufshpb_is_general_lun(int lun) +{ + return lun < UFS_UPIU_MAX_UNIT_NUM_ID; +} + +static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +{ + return hpb->lu_pinned_end != PINNED_NOT_SET && + rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end; +} + +static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) +{ + bool ret = false; + unsigned long flags; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + return; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) + ret = true; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + if (ret) + queue_work(ufshpb_wq, &hpb->map_work); +} + +static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + struct utp_hpb_rsp *rsp_field) +{ + /* Check HPB_UPDATE_ALERT */ + if (!(lrbp->ucd_rsp_ptr->header.dword_2 & + UPIU_HEADER_DWORD(0, 2, 0, 0))) + return false; + + if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN || + rsp_field->desc_type != DEV_DES_TYPE || + rsp_field->additional_len != DEV_ADDITIONAL_LEN || + rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM || + rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM || + rsp_field->hpb_op == HPB_RSP_NONE || + (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE && + !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt)) + return false; + + if (!ufshpb_is_general_lun(rsp_field->lun)) { + dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n", + lrbp->lun); + return false; + } + + return true; +} + +static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, + int srgn_offset, int cnt, bool set_dirty) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn, *prev_srgn = NULL; + int set_bit_len; + int bitmap_len; + unsigned long flags; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if ((srgn_offset + cnt) > bitmap_len) + set_bit_len = bitmap_len - srgn_offset; + else + set_bit_len = cnt; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + if (set_dirty) { + if (srgn->srgn_state == HPB_SRGN_VALID) + bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, + set_bit_len); + } else if (hpb->is_hcm) { + /* rewind the read timer for lru regions */ + rgn->read_timeout = ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; + } + } + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (hpb->is_hcm && prev_srgn != srgn) { + bool activate = false; + + spin_lock(&rgn->rgn_lock); + if (set_dirty) { + rgn->reads -= srgn->reads; + srgn->reads = 0; + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + } else { + srgn->reads++; + rgn->reads++; + if (srgn->reads == hpb->params.activation_thld) + activate = true; + } + spin_unlock(&rgn->rgn_lock); + + if (activate || + test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate region %d-%d\n", rgn_idx, srgn_idx); + } + + prev_srgn = srgn; + } + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= set_bit_len; + if (cnt > 0) + goto next_srgn; +} + +static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int bitmap_len; + int bit_len; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + return true; + + /* + * If the region state is active, mctx must be allocated. + * In this case, check whether the region is evicted or + * mctx allocation fail. + */ + if (unlikely(!srgn->mctx)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return true; + } + + if ((srgn_offset + cnt) > bitmap_len) + bit_len = bitmap_len - srgn_offset; + else + bit_len = cnt; + + if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset, + srgn_offset) < bit_len + srgn_offset) + return true; + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= bit_len; + if (cnt > 0) + goto next_srgn; + + return false; +} + +static inline bool is_rgn_dirty(struct ufshpb_region *rgn) +{ + return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); +} + +static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx, int pos, + int len, __be64 *ppn_buf) +{ + struct page *page; + int index, offset; + int copied; + + index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE); + offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE); + + if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE)) + copied = len; + else + copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset; + + page = mctx->m_page[index]; + if (unlikely(!page)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "error. cannot find page in mctx\n"); + return -ENOMEM; + } + + memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE), + copied * HPB_ENTRY_SIZE); + + return copied; +} + +static void +ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, + int *srgn_idx, int *offset) +{ + int rgn_offset; + + *rgn_idx = lpn >> hpb->entries_per_rgn_shift; + rgn_offset = lpn & hpb->entries_per_rgn_mask; + *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; + *offset = rgn_offset & hpb->entries_per_srgn_mask; +} + +static void +ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + __be64 ppn, u8 transfer_len) +{ + unsigned char *cdb = lrbp->cmd->cmnd; + __be64 ppn_tmp = ppn; + cdb[0] = UFSHPB_READ; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ) + ppn_tmp = (__force __be64)swab64((__force u64)ppn); + + /* ppn value is stored as big-endian in the host memory */ + memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); + cdb[14] = transfer_len; + cdb[15] = 0; + + lrbp->cmd->cmd_len = UFS_CDB_SIZE; +} + +/* + * This function will set up HPB read command using host-side L2P map data. + */ +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct scsi_cmnd *cmd = lrbp->cmd; + u32 lpn; + __be64 ppn; + unsigned long flags; + int transfer_len, rgn_idx, srgn_idx, srgn_offset; + int err = 0; + + hpb = ufshpb_get_hpb_data(cmd->device); + if (!hpb) + return -ENODEV; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return -ENODEV; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT", __func__); + return -ENODEV; + } + + if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) || + (!ufshpb_is_write_or_discard(cmd) && + !ufshpb_is_read_cmd(cmd))) + return 0; + + transfer_len = sectors_to_logical(cmd->device, + blk_rq_sectors(scsi_cmd_to_rq(cmd))); + if (unlikely(!transfer_len)) + return 0; + + lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd))); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + /* If command type is WRITE or DISCARD, set bitmap as drity */ + if (ufshpb_is_write_or_discard(cmd)) { + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, true); + return 0; + } + + if (!ufshpb_is_supported_chunk(hpb, transfer_len)) + return 0; + + if (hpb->is_hcm) { + /* + * in host control mode, reads are the main source for + * activation trials. + */ + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, false); + + /* keep those counters normalized */ + if (rgn->reads > hpb->entries_per_srgn) + schedule_work(&hpb->ufshpb_normalization_work); + } + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len)) { + hpb->stats.miss_cnt++; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return 0; + } + + err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + if (unlikely(err < 0)) { + /* + * In this case, the region state is active, + * but the ppn table is not allocated. + * Make sure that ppn table must be allocated on + * active state. + */ + dev_err(hba->dev, "get ppn failed. err %d\n", err); + return err; + } + + ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len); + + hpb->stats.hit_cnt++; + return 0; +} + +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, + int rgn_idx, enum req_opf dir, + bool atomic) +{ + struct ufshpb_req *rq; + struct request *req; + int retries = HPB_MAP_REQ_RETRIES; + + rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); + if (!rq) + return NULL; + +retry: + req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir, + BLK_MQ_REQ_NOWAIT); + + if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { + usleep_range(3000, 3100); + goto retry; + } + + if (IS_ERR(req)) + goto free_rq; + + rq->hpb = hpb; + rq->req = req; + rq->rb.rgn_idx = rgn_idx; + + return rq; + +free_rq: + kmem_cache_free(hpb->map_req_cache, rq); + return NULL; +} + +static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) +{ + blk_mq_free_request(rq->req); + kmem_cache_free(hpb->map_req_cache, rq); +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + struct bio *bio; + unsigned long flags; + + if (hpb->is_hcm && + hpb->num_inflight_map_req >= hpb->params.inflight_map_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, + "map_req throttle. inflight %d throttle %d", + hpb->num_inflight_map_req, + hpb->params.inflight_map_req); + return NULL; + } + + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false); + if (!map_req) + return NULL; + + bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL); + if (!bio) { + ufshpb_put_req(hpb, map_req); + return NULL; + } + + map_req->bio = bio; + + map_req->rb.srgn_idx = srgn->srgn_idx; + map_req->rb.mctx = srgn->mctx; + + spin_lock_irqsave(&hpb->param_lock, flags); + hpb->num_inflight_map_req++; + spin_unlock_irqrestore(&hpb->param_lock, flags); + + return map_req; +} + +static void ufshpb_put_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + unsigned long flags; + + bio_put(map_req->bio); + ufshpb_put_req(hpb, map_req); + + spin_lock_irqsave(&hpb->param_lock, flags); + hpb->num_inflight_map_req--; + spin_unlock_irqrestore(&hpb->param_lock, flags); +} + +static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + u32 num_entries = hpb->entries_per_srgn; + + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return -1; + } + + if (unlikely(srgn->is_last)) + num_entries = hpb->last_srgn_entries; + + bitmap_zero(srgn->mctx->ppn_dirty, num_entries); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + + return 0; +} + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&rgn->list_inact_rgn); + + if (list_empty(&srgn->list_act_srgn)) + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + + hpb->stats.rcmd_active_cnt++; +} + +static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int srgn_idx; + + rgn = hpb->rgn_tbl + rgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + list_del_init(&srgn->list_act_srgn); + + if (list_empty(&rgn->list_inact_rgn)) + list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); + + hpb->stats.rcmd_inactive_cnt++; +} + +static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + /* + * If there is no mctx in subregion + * after I/O progress for HPB_READ_BUFFER, the region to which the + * subregion belongs was evicted. + * Make sure the region must not evict in I/O progress + */ + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "region %d subregion %d evicted\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + srgn->srgn_state = HPB_SRGN_VALID; +} + +static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data; + + ufshpb_put_req(umap_req->hpb, umap_req); +} + +static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_subregion *srgn; + unsigned long flags; + + srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + + map_req->rb.srgn_idx; + + ufshpb_clear_dirty_bitmap(hpb, srgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_activate_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + ufshpb_put_map_req(map_req->hpb, map_req); +} + +static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID : + UFSHPB_WRITE_BUFFER_INACT_ALL_ID; + if (rgn) + put_unaligned_be16(rgn->rgn_idx, &cdb[2]); + cdb[9] = 0x00; +} + +static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, + int srgn_idx, int srgn_mem_size) +{ + cdb[0] = UFSHPB_READ_BUFFER; + cdb[1] = UFSHPB_READ_BUFFER_ID; + + put_unaligned_be16(rgn_idx, &cdb[2]); + put_unaligned_be16(srgn_idx, &cdb[4]); + put_unaligned_be24(srgn_mem_size, &cdb[6]); + + cdb[9] = 0x00; +} + +static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_req *umap_req, + struct ufshpb_region *rgn) +{ + struct request *req = umap_req->req; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); + + req->timeout = 0; + req->end_io_data = umap_req; + req->end_io = ufshpb_umap_req_compl_fn; + + ufshpb_set_unmap_cmd(scmd->cmnd, rgn); + scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(req, true); + + hpb->stats.umap_req_cnt++; +} + +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req, bool last) +{ + struct request_queue *q; + struct request *req; + struct scsi_cmnd *scmd; + int mem_size = hpb->srgn_mem_size; + int ret = 0; + int i; + + q = hpb->sdev_ufs_lu->request_queue; + for (i = 0; i < hpb->pages_per_srgn; i++) { + ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i], + PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail %d - %d\n", + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + return ret; + } + } + + req = map_req->req; + + blk_rq_append_bio(req, map_req->bio); + + req->end_io_data = map_req; + req->end_io = ufshpb_map_req_compl_fn; + + if (unlikely(last)) + mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; + + scmd = blk_mq_rq_to_pdu(req); + ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx, + map_req->rb.srgn_idx, mem_size); + scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(req, true); + + hpb->stats.map_req_cnt++; + return 0; +} + +static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, + bool last) +{ + struct ufshpb_map_ctx *mctx; + u32 num_entries = hpb->entries_per_srgn; + int i, j; + + mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL); + if (!mctx) + return NULL; + + mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); + if (!mctx->m_page) + goto release_mctx; + + if (unlikely(last)) + num_entries = hpb->last_srgn_entries; + + mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL); + if (!mctx->ppn_dirty) + goto release_m_page; + + for (i = 0; i < hpb->pages_per_srgn; i++) { + mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL); + if (!mctx->m_page[i]) { + for (j = 0; j < i; j++) + mempool_free(mctx->m_page[j], ufshpb_page_pool); + goto release_ppn_dirty; + } + clear_page(page_address(mctx->m_page[i])); + } + + return mctx; + +release_ppn_dirty: + bitmap_free(mctx->ppn_dirty); +release_m_page: + kmem_cache_free(hpb->m_page_cache, mctx->m_page); +release_mctx: + mempool_free(mctx, ufshpb_mctx_pool); + return NULL; +} + +static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx) +{ + int i; + + for (i = 0; i < hpb->pages_per_srgn; i++) + mempool_free(mctx->m_page[i], ufshpb_page_pool); + + bitmap_free(mctx->ppn_dirty); + kmem_cache_free(hpb->m_page_cache, mctx->m_page); + mempool_free(mctx, ufshpb_mctx_pool); +} + +static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state == HPB_SRGN_ISSUED) + return -EPERM; + + return 0; +} + +static void ufshpb_read_to_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_read_to_work.work); + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *next_rgn; + unsigned long flags; + unsigned int poll; + LIST_HEAD(expired_list); + + if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) + return; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, + list_lru_rgn) { + bool timedout = ktime_after(ktime_get(), rgn->read_timeout); + + if (timedout) { + rgn->read_timeout_expiries--; + if (is_rgn_dirty(rgn) || + rgn->read_timeout_expiries == 0) + list_add(&rgn->list_expired_rgn, &expired_list); + else + rgn->read_timeout = ktime_add_ms(ktime_get(), + hpb->params.read_timeout_ms); + } + } + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &expired_list, + list_expired_rgn) { + list_del_init(&rgn->list_expired_rgn); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + ufshpb_kick_map_work(hpb); + + clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); + + poll = hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); +} + +static void ufshpb_add_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + rgn->rgn_state = HPB_RGN_ACTIVE; + list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + atomic_inc(&lru_info->active_cnt); + if (rgn->hpb->is_hcm) { + rgn->read_timeout = + ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; + } +} + +static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); +} + +static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *victim_rgn = NULL; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) + continue; + + /* + * in host control mode, verify that the exiting region + * has fewer reads + */ + if (hpb->is_hcm && + rgn->reads > hpb->params.eviction_thld_exit) + continue; + + victim_rgn = rgn; + break; + } + + if (!victim_rgn) + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: no region allocated\n", + __func__); + + return victim_rgn; +} + +static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_del_init(&rgn->list_lru_rgn); + rgn->rgn_state = HPB_RGN_INACTIVE; + atomic_dec(&lru_info->active_cnt); +} + +static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + ufshpb_put_map_ctx(hpb, srgn->mctx); + srgn->srgn_state = HPB_SRGN_UNUSED; + srgn->mctx = NULL; + } +} + +static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + bool atomic) +{ + struct ufshpb_req *umap_req; + int rgn_idx = rgn ? rgn->rgn_idx : 0; + + umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic); + if (!umap_req) + return -ENOMEM; + + ufshpb_execute_umap_req(hpb, umap_req, rgn); + + return 0; +} + +static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + return ufshpb_issue_umap_req(hpb, rgn, true); +} + +static void __ufshpb_evict_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + struct ufshpb_subregion *srgn; + int srgn_idx; + + lru_info = &hpb->lru_info; + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); + + ufshpb_cleanup_lru_info(lru_info, rgn); + + for_each_sub_region(rgn, srgn_idx, srgn) + ufshpb_purge_active_subregion(hpb, srgn); +} + +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state == HPB_RGN_PINNED) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "pinned region cannot drop-out. region %d\n", + rgn->rgn_idx); + goto out; + } + + if (!list_empty(&rgn->list_lru_rgn)) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) { + ret = -EBUSY; + goto out; + } + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + ret = ufshpb_issue_umap_single_req(hpb, rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ret) + goto out; + } + + __ufshpb_evict_region(hpb, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + unsigned long flags; + int ret; + int err = -EAGAIN; + bool alloc_required = false; + enum HPB_SRGN_STATE state = HPB_SRGN_INVALID; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + goto unlock_out; + } + + if ((rgn->rgn_state == HPB_RGN_INACTIVE) && + (srgn->srgn_state == HPB_SRGN_INVALID)) { + err = 0; + goto unlock_out; + } + + if (srgn->srgn_state == HPB_SRGN_UNUSED) + alloc_required = true; + + /* + * If the subregion is already ISSUED state, + * a specific event (e.g., GC or wear-leveling, etc.) occurs in + * the device and HPB response for map loading is received. + * In this case, after finishing the HPB_READ_BUFFER, + * the next HPB_READ_BUFFER is performed again to obtain the latest + * map data. + */ + if (srgn->srgn_state == HPB_SRGN_ISSUED) + goto unlock_out; + + srgn->srgn_state = HPB_SRGN_ISSUED; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (alloc_required) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "get map_ctx failed. region %d - %d\n", + rgn->rgn_idx, srgn->srgn_idx); + state = HPB_SRGN_UNUSED; + goto change_srgn_state; + } + } + + map_req = ufshpb_get_map_req(hpb, srgn); + if (!map_req) + goto change_srgn_state; + + + ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: issue map_req failed: %d, region %d - %d\n", + __func__, ret, srgn->rgn_idx, srgn->srgn_idx); + goto free_map_req; + } + return 0; + +free_map_req: + ufshpb_put_map_req(hpb, map_req); +change_srgn_state: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + srgn->srgn_state = state; +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return err; +} + +static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + struct ufshpb_region *victim_rgn = NULL; + struct victim_select_info *lru_info = &hpb->lru_info; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + /* + * If region belongs to lru_list, just move the region + * to the front of lru list because the state of the region + * is already active-state. + */ + if (!list_empty(&rgn->list_lru_rgn)) { + ufshpb_hit_lru_info(lru_info, rgn); + goto out; + } + + if (rgn->rgn_state == HPB_RGN_INACTIVE) { + if (atomic_read(&lru_info->active_cnt) == + lru_info->max_lru_active_cnt) { + /* + * If the maximum number of active regions + * is exceeded, evict the least recently used region. + * This case may occur when the device responds + * to the eviction information late. + * It is okay to evict the least recently used region, + * because the device could detect this region + * by not issuing HPB_READ + * + * in host control mode, verify that the entering + * region has enough reads + */ + if (hpb->is_hcm && + rgn->reads < hpb->params.eviction_thld_enter) { + ret = -EACCES; + goto out; + } + + victim_rgn = ufshpb_victim_lru_info(hpb); + if (!victim_rgn) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "cannot get victim region %s\n", + hpb->is_hcm ? "" : "error"); + ret = -ENOMEM; + goto out; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "LRU full (%d), choose victim %d\n", + atomic_read(&lru_info->active_cnt), + victim_rgn->rgn_idx); + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, + flags); + ret = ufshpb_issue_umap_single_req(hpb, + victim_rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, + flags); + if (ret) + goto out; + } + + __ufshpb_evict_region(hpb, victim_rgn); + } + + /* + * When a region is added to lru_info list_head, + * it is guaranteed that the subregion has been + * assigned all mctx. If failed, try to receive mctx again + * without being added to lru_info list_head + */ + ufshpb_add_lru_info(lru_info, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} +/** + *ufshpb_submit_region_inactive() - submit a region to be inactivated later + *@hpb: per-LU HPB instance + *@region_index: the index associated with the region that will be inactivated later + */ +static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index) +{ + int subregion_index; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + /* + * Remove this region from active region list and add it to inactive list + */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, region_index); + spin_unlock(&hpb->rsp_list_lock); + + rgn = hpb->rgn_tbl + region_index; + + /* + * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion + */ + spin_lock(&hpb->rgn_state_lock); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) { + srgn = rgn->srgn_tbl + subregion_index; + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + } + } + spin_unlock(&hpb->rgn_state_lock); +} + +static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, + struct utp_hpb_rsp *rsp_field) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int i, rgn_i, srgn_i; + + BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE); + /* + * If the active region and the inactive region are the same, + * we will inactivate this region. + * The device could check this (region inactivated) and + * will response the proper active region information + */ + for (i = 0; i < rsp_field->active_rgn_cnt; i++) { + rgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn); + srgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); + + rgn = hpb->rgn_tbl + rgn_i; + if (hpb->is_hcm && + (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { + /* + * in host control mode, subregion activation + * recommendations are only allowed to active regions. + * Also, ignore recommendations for dirty regions - the + * host will make decisions concerning those by himself + */ + continue; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate(%d) region %d - %d\n", i, rgn_i, srgn_i); + + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_active_info(hpb, rgn_i, srgn_i); + spin_unlock(&hpb->rsp_list_lock); + + srgn = rgn->srgn_tbl + srgn_i; + + /* blocking HPB_READ */ + spin_lock(&hpb->rgn_state_lock); + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + spin_unlock(&hpb->rgn_state_lock); + } + + if (hpb->is_hcm) { + /* + * in host control mode the device is not allowed to inactivate + * regions + */ + goto out; + } + + for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { + rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i); + ufshpb_submit_region_inactive(hpb, rgn_i); + } + +out: + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", + rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); + + if (ufshpb_get_state(hpb) == HPB_PRESENT) + queue_work(ufshpb_wq, &hpb->map_work); +} + +/* + * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later + */ +static void ufshpb_set_regions_update(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + unsigned long flags; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) + set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags); + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); +} + +static void ufshpb_dev_reset_handler(struct ufs_hba *hba) +{ + struct scsi_device *sdev; + struct ufshpb_lu *hpb; + + __shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (hpb->is_hcm) { + /* + * For the HPB host control mode, in case device powered up and lost HPB + * information, we will set the region flag to be RGN_FLAG_UPDATE, it will + * let host reload its L2P entries(reactivate region in the UFS device). + */ + ufshpb_set_regions_update(hpb); + } else { + /* + * For the HPB device control mode, if host side receives 02h:HPB Operation + * in UPIU response, which means device recommends the host side should + * inactivate all active regions. Here we add all active regions to inactive + * list, they will be inactivated later in ufshpb_map_work_handler(). + */ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) + ufshpb_submit_region_inactive(hpb, rgn->rgn_idx); + + if (ufshpb_get_state(hpb) == HPB_PRESENT) + queue_work(ufshpb_wq, &hpb->map_work); + } + } +} + +/* + * This function will parse recommended active subregion information in sense + * data field of response UPIU with SAM_STAT_GOOD state. + */ +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); + struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr; + int data_seg_len; + + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + + /* If data segment length is zero, rsp_field is not valid */ + if (!data_seg_len) + return; + + if (unlikely(lrbp->lun != rsp_field->lun)) { + struct scsi_device *sdev; + bool found = false; + + __shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + continue; + + if (rsp_field->lun == hpb->lun) { + found = true; + break; + } + } + + if (!found) + return; + } + + if (!hpb) + return; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT/SUSPEND\n", + __func__); + return; + } + + BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE); + + if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field)) + return; + + hpb->stats.rcmd_noti_cnt++; + + switch (rsp_field->hpb_op) { + case HPB_RSP_REQ_REGION_UPDATE: + if (data_seg_len != DEV_DATA_SEG_LEN) + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "%s: data seg length is not same.\n", + __func__); + ufshpb_rsp_req_region_update(hpb, rsp_field); + break; + case HPB_RSP_DEV_RESET: + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "UFS device lost HPB information during PM.\n"); + ufshpb_dev_reset_handler(hba); + + break; + default: + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "hpb_op is not available: %d\n", + rsp_field->hpb_op); + break; + } +} + +static void ufshpb_add_active_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + if (!list_empty(&rgn->list_inact_rgn)) + return; + + if (!list_empty(&srgn->list_act_srgn)) { + list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); + return; + } + + list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct list_head *pending_list) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + if (!list_empty(&rgn->list_inact_rgn)) + return; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (!list_empty(&srgn->list_act_srgn)) + return; + + list_add_tail(&rgn->list_inact_rgn, pending_list); +} + +static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + ret = ufshpb_add_region(hpb, rgn); + if (ret) + goto active_failed; + + ret = ufshpb_issue_map_req(hpb, rgn, srgn); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "issue map_req failed. ret %d, region %d - %d\n", + ret, rgn->rgn_idx, srgn->srgn_idx); + goto active_failed; + } + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + return; + +active_failed: + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", + rgn->rgn_idx, srgn->srgn_idx); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_active_list(hpb, rgn, srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + unsigned long flags; + int ret; + LIST_HEAD(pending_list); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, + struct ufshpb_region, + list_inact_rgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&rgn->list_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ret = ufshpb_evict_region(hpb, rgn); + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + list_splice(&pending_list, &hpb->lh_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_normalization_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_normalization_work); + int rgn_idx; + u8 factor = hpb->params.normalization_factor; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; + int srgn_idx; + + spin_lock(&rgn->rgn_lock); + rgn->reads = 0; + for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { + struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; + + srgn->reads >>= factor; + rgn->reads += srgn->reads; + } + spin_unlock(&rgn->rgn_lock); + + if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads) + continue; + + /* if region is active but has no reads - inactivate it */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock(&hpb->rsp_list_lock); + } +} + +static void ufshpb_map_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + return; + } + + ufshpb_run_inactive_region_list(hpb); + ufshpb_run_active_subregion_list(hpb); +} + +/* + * this function doesn't need to hold lock due to be called in init. + * (rgn_state_lock, rsp_list_lock, etc..) + */ +static int ufshpb_init_pinned_active_region(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx, i; + int err = 0; + + for_each_sub_region(rgn, srgn_idx, srgn) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + srgn->srgn_state = HPB_SRGN_INVALID; + if (!srgn->mctx) { + err = -ENOMEM; + dev_err(hba->dev, + "alloc mctx for pinned region failed\n"); + goto release; + } + + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + } + + rgn->rgn_state = HPB_RGN_PINNED; + return 0; + +release: + for (i = 0; i < srgn_idx; i++) { + srgn = rgn->srgn_tbl + i; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + return err; +} + +static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, bool last) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) { + INIT_LIST_HEAD(&srgn->list_act_srgn); + + srgn->rgn_idx = rgn->rgn_idx; + srgn->srgn_idx = srgn_idx; + srgn->srgn_state = HPB_SRGN_UNUSED; + } + + if (unlikely(last && hpb->last_srgn_entries)) + srgn->is_last = true; +} + +static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, int srgn_cnt) +{ + rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion), + GFP_KERNEL); + if (!rgn->srgn_tbl) + return -ENOMEM; + + rgn->srgn_cnt = srgn_cnt; + return 0; +} + +static void ufshpb_lu_parameter_init(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + u32 entries_per_rgn; + u64 rgn_mem_size, tmp; + + if (ufshpb_is_legacy(hba)) + hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; + else + hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd; + + hpb->lu_pinned_start = hpb_lu_info->pinned_start; + hpb->lu_pinned_end = hpb_lu_info->num_pinned ? + (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) + : PINNED_NOT_SET; + hpb->lru_info.max_lru_active_cnt = + hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned; + + rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT + * HPB_ENTRY_SIZE; + do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE); + hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) + * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE; + + tmp = rgn_mem_size; + do_div(tmp, HPB_ENTRY_SIZE); + entries_per_rgn = (u32)tmp; + hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); + hpb->entries_per_rgn_mask = entries_per_rgn - 1; + + hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; + hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); + hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; + + tmp = rgn_mem_size; + do_div(tmp, hpb->srgn_mem_size); + hpb->srgns_per_rgn = (int)tmp; + + hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + entries_per_rgn); + hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); + hpb->last_srgn_entries = hpb_lu_info->num_blocks + % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); + + hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); + + if (hpb_dev_info->control_mode == HPB_HOST_CONTROL) + hpb->is_hcm = true; +} + +static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn_table, *rgn; + int rgn_idx, i; + int ret = 0; + + rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), + GFP_KERNEL); + if (!rgn_table) + return -ENOMEM; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + int srgn_cnt = hpb->srgns_per_rgn; + bool last_srgn = false; + + rgn = rgn_table + rgn_idx; + rgn->rgn_idx = rgn_idx; + + spin_lock_init(&rgn->rgn_lock); + + INIT_LIST_HEAD(&rgn->list_inact_rgn); + INIT_LIST_HEAD(&rgn->list_lru_rgn); + INIT_LIST_HEAD(&rgn->list_expired_rgn); + + if (rgn_idx == hpb->rgns_per_lu - 1) { + srgn_cnt = ((hpb->srgns_per_lu - 1) % + hpb->srgns_per_rgn) + 1; + last_srgn = true; + } + + ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); + if (ret) + goto release_srgn_table; + ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); + + if (ufshpb_is_pinned_region(hpb, rgn_idx)) { + ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); + if (ret) + goto release_srgn_table; + } else { + rgn->rgn_state = HPB_RGN_INACTIVE; + } + + rgn->rgn_flags = 0; + rgn->hpb = hpb; + } + + hpb->rgn_tbl = rgn_table; + + return 0; + +release_srgn_table: + for (i = 0; i <= rgn_idx; i++) + kvfree(rgn_table[i].srgn_tbl); + + kvfree(rgn_table); + return ret; +} + +static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + srgn->srgn_state = HPB_SRGN_UNUSED; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } +} + +static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) +{ + int rgn_idx; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn; + + rgn = hpb->rgn_tbl + rgn_idx; + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + rgn->rgn_state = HPB_RGN_INACTIVE; + + ufshpb_destroy_subregion_tbl(hpb, rgn); + } + + kvfree(rgn->srgn_tbl); + } + + kvfree(hpb->rgn_tbl); +} + +/* SYSFS functions */ +#define ufshpb_sysfs_attr_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \ +} \ +\ +static DEVICE_ATTR_RO(__name) + +ufshpb_sysfs_attr_show_func(hit_cnt); +ufshpb_sysfs_attr_show_func(miss_cnt); +ufshpb_sysfs_attr_show_func(rcmd_noti_cnt); +ufshpb_sysfs_attr_show_func(rcmd_active_cnt); +ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt); +ufshpb_sysfs_attr_show_func(map_req_cnt); +ufshpb_sysfs_attr_show_func(umap_req_cnt); + +static struct attribute *hpb_dev_stat_attrs[] = { + &dev_attr_hit_cnt.attr, + &dev_attr_miss_cnt.attr, + &dev_attr_rcmd_noti_cnt.attr, + &dev_attr_rcmd_active_cnt.attr, + &dev_attr_rcmd_inactive_cnt.attr, + &dev_attr_map_req_cnt.attr, + &dev_attr_umap_req_cnt.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_stat_group = { + .name = "hpb_stats", + .attrs = hpb_dev_stat_attrs, +}; + +/* SYSFS functions */ +#define ufshpb_sysfs_param_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%d\n", hpb->params.__name); \ +} + +ufshpb_sysfs_param_show_func(requeue_timeout_ms); +static ssize_t +requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val < 0) + return -EINVAL; + + hpb->params.requeue_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(requeue_timeout_ms); + +ufshpb_sysfs_param_show_func(activation_thld); +static ssize_t +activation_thld_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.activation_thld = val; + + return count; +} +static DEVICE_ATTR_RW(activation_thld); + +ufshpb_sysfs_param_show_func(normalization_factor); +static ssize_t +normalization_factor_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > ilog2(hpb->entries_per_srgn)) + return -EINVAL; + + hpb->params.normalization_factor = val; + + return count; +} +static DEVICE_ATTR_RW(normalization_factor); + +ufshpb_sysfs_param_show_func(eviction_thld_enter); +static ssize_t +eviction_thld_enter_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.eviction_thld_exit) + return -EINVAL; + + hpb->params.eviction_thld_enter = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_enter); + +ufshpb_sysfs_param_show_func(eviction_thld_exit); +static ssize_t +eviction_thld_exit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.activation_thld) + return -EINVAL; + + hpb->params.eviction_thld_exit = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_exit); + +ufshpb_sysfs_param_show_func(read_timeout_ms); +static ssize_t +read_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* read_timeout >> timeout_polling_interval */ + if (val < hpb->params.timeout_polling_interval_ms * 2) + return -EINVAL; + + hpb->params.read_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_ms); + +ufshpb_sysfs_param_show_func(read_timeout_expiries); +static ssize_t +read_timeout_expiries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.read_timeout_expiries = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_expiries); + +ufshpb_sysfs_param_show_func(timeout_polling_interval_ms); +static ssize_t +timeout_polling_interval_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* timeout_polling_interval << read_timeout */ + if (val <= 0 || val > hpb->params.read_timeout_ms / 2) + return -EINVAL; + + hpb->params.timeout_polling_interval_ms = val; + + return count; +} +static DEVICE_ATTR_RW(timeout_polling_interval_ms); + +ufshpb_sysfs_param_show_func(inflight_map_req); +static ssize_t inflight_map_req_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1) + return -EINVAL; + + hpb->params.inflight_map_req = val; + + return count; +} +static DEVICE_ATTR_RW(inflight_map_req); + +static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.activation_thld = ACTIVATION_THRESHOLD; + hpb->params.normalization_factor = 1; + hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5); + hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4); + hpb->params.read_timeout_ms = READ_TO_MS; + hpb->params.read_timeout_expiries = READ_TO_EXPIRIES; + hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS; + hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT; +} + +static struct attribute *hpb_dev_param_attrs[] = { + &dev_attr_requeue_timeout_ms.attr, + &dev_attr_activation_thld.attr, + &dev_attr_normalization_factor.attr, + &dev_attr_eviction_thld_enter.attr, + &dev_attr_eviction_thld_exit.attr, + &dev_attr_read_timeout_ms.attr, + &dev_attr_read_timeout_expiries.attr, + &dev_attr_timeout_polling_interval_ms.attr, + &dev_attr_inflight_map_req.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_param_group = { + .name = "hpb_params", + .attrs = hpb_dev_param_attrs, +}; + +static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL, *t; + int qd = hpb->sdev_ufs_lu->queue_depth / 2; + int i; + + INIT_LIST_HEAD(&hpb->lh_pre_req_free); + + hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL); + hpb->throttle_pre_req = qd; + hpb->num_inflight_pre_req = 0; + + if (!hpb->pre_req) + goto release_mem; + + for (i = 0; i < qd; i++) { + pre_req = hpb->pre_req + i; + INIT_LIST_HEAD(&pre_req->list_req); + pre_req->req = NULL; + + pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL); + if (!pre_req->bio) + goto release_mem; + + pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pre_req->wb.m_page) { + bio_put(pre_req->bio); + goto release_mem; + } + + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + } + + return 0; +release_mem: + list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) { + list_del_init(&pre_req->list_req); + bio_put(pre_req->bio); + __free_page(pre_req->wb.m_page); + } + + kfree(hpb->pre_req); + return -ENOMEM; +} + +static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL; + int i; + + for (i = 0; i < hpb->throttle_pre_req; i++) { + pre_req = hpb->pre_req + i; + bio_put(hpb->pre_req[i].bio); + if (!pre_req->wb.m_page) + __free_page(hpb->pre_req[i].wb.m_page); + list_del_init(&pre_req->list_req); + } + + kfree(hpb->pre_req); +} + +static void ufshpb_stat_init(struct ufshpb_lu *hpb) +{ + hpb->stats.hit_cnt = 0; + hpb->stats.miss_cnt = 0; + hpb->stats.rcmd_noti_cnt = 0; + hpb->stats.rcmd_active_cnt = 0; + hpb->stats.rcmd_inactive_cnt = 0; + hpb->stats.map_req_cnt = 0; + hpb->stats.umap_req_cnt = 0; +} + +static void ufshpb_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; + if (hpb->is_hcm) + ufshpb_hcm_param_init(hpb); +} + +static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + int ret; + + spin_lock_init(&hpb->rgn_state_lock); + spin_lock_init(&hpb->rsp_list_lock); + spin_lock_init(&hpb->param_lock); + + INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); + INIT_LIST_HEAD(&hpb->lh_act_srgn); + INIT_LIST_HEAD(&hpb->lh_inact_rgn); + INIT_LIST_HEAD(&hpb->list_hpb_lu); + + INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); + if (hpb->is_hcm) { + INIT_WORK(&hpb->ufshpb_normalization_work, + ufshpb_normalization_work_handler); + INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, + ufshpb_read_to_handler); + } + + hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", + sizeof(struct ufshpb_req), 0, 0, NULL); + if (!hpb->map_req_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail", + hpb->lun); + return -ENOMEM; + } + + hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", + sizeof(struct page *) * hpb->pages_per_srgn, + 0, 0, NULL); + if (!hpb->m_page_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail", + hpb->lun); + ret = -ENOMEM; + goto release_req_cache; + } + + ret = ufshpb_pre_req_mempool_init(hpb); + if (ret) { + dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail", + hpb->lun); + goto release_m_page_cache; + } + + ret = ufshpb_alloc_region_tbl(hba, hpb); + if (ret) + goto release_pre_req_mempool; + + ufshpb_stat_init(hpb); + ufshpb_param_init(hpb); + + if (hpb->is_hcm) { + unsigned int poll; + + poll = hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); + } + + return 0; + +release_pre_req_mempool: + ufshpb_pre_req_mempool_destroy(hpb); +release_m_page_cache: + kmem_cache_destroy(hpb->m_page_cache); +release_req_cache: + kmem_cache_destroy(hpb->map_req_cache); + return ret; +} + +static struct ufshpb_lu * +ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); + if (!hpb) + return NULL; + + hpb->lun = sdev->lun; + hpb->sdev_ufs_lu = sdev; + + ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); + + ret = ufshpb_lu_hpb_init(hba, hpb); + if (ret) { + dev_err(hba->dev, "hpb lu init failed. ret %d", ret); + goto release_hpb; + } + + sdev->hostdata = hpb; + return hpb; + +release_hpb: + kfree(hpb); + return NULL; +} + +static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn, *next_rgn; + struct ufshpb_subregion *srgn, *next_srgn; + unsigned long flags; + + /* + * If the device reset occurred, the remaining HPB region information + * may be stale. Therefore, by discarding the lists of HPB response + * that remained after reset, we prevent unnecessary work. + */ + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, + list_inact_rgn) + list_del_init(&rgn->list_inact_rgn); + + list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, + list_act_srgn) + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) +{ + if (hpb->is_hcm) { + cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); + cancel_work_sync(&hpb->ufshpb_normalization_work); + } + cancel_work_sync(&hpb->map_work); +} + +static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) +{ + int err = 0; + bool flag_res = true; + int try; + + /* wait for the device to complete HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + dev_dbg(hba->dev, + "%s start flag reset polling %d times\n", + __func__, try); + + /* Poll fHpbReset flag to be cleared */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res); + + if (err) { + dev_err(hba->dev, + "%s reading fHpbReset flag failed with error %d\n", + __func__, err); + return flag_res; + } + + if (!flag_res) + goto out; + + usleep_range(1000, 1100); + } + if (flag_res) { + dev_err(hba->dev, + "%s fHpbReset was not cleared by the device\n", + __func__); + } +out: + return flag_res; +} + +/** + * ufshpb_toggle_state - switch HPB state of all LUs + * @hba: per-adapter instance + * @src: expected current HPB state + * @dest: target HPB state to switch to + */ +void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb || ufshpb_get_state(hpb) != src) + continue; + ufshpb_set_state(hpb, dest); + + if (dest == HPB_RESET) { + ufshpb_cancel_jobs(hpb); + ufshpb_discard_rsp_lists(hpb); + } + } +} + +void ufshpb_suspend(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + + ufshpb_set_state(hpb, HPB_SUSPEND); + ufshpb_cancel_jobs(hpb); + } +} + +void ufshpb_resume(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND) + continue; + + ufshpb_set_state(hpb, HPB_PRESENT); + ufshpb_kick_map_work(hpb); + if (hpb->is_hcm) { + unsigned int poll = hpb->params.timeout_polling_interval_ms; + + schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll)); + } + } +} + +static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, + struct ufshpb_lu_info *hpb_lu_info) +{ + u16 max_active_rgns; + u8 lu_enable; + int size; + int ret; + char desc_buf[QUERY_DESC_MAX_SIZE]; + + ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + QUERY_DESC_IDN_UNIT, lun, 0, + desc_buf, &size); + ufshcd_rpm_put_sync(hba); + + if (ret) { + dev_err(hba->dev, + "%s: idn: %d lun: %d query request failed", + __func__, QUERY_DESC_IDN_UNIT, lun); + return ret; + } + + lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE]; + if (lu_enable != LU_ENABLED_HPB_FUNC) + return -ENODEV; + + max_active_rgns = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS); + if (!max_active_rgns) { + dev_err(hba->dev, + "lun %d wrong number of max active regions\n", lun); + return -ENODEV; + } + + hpb_lu_info->num_blocks = get_unaligned_be64( + desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT); + hpb_lu_info->pinned_start = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF); + hpb_lu_info->num_pinned = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS); + hpb_lu_info->max_active_rgns = max_active_rgns; + + return 0; +} + +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + return; + + ufshpb_set_state(hpb, HPB_FAILED); + + sdev = hpb->sdev_ufs_lu; + sdev->hostdata = NULL; + + ufshpb_cancel_jobs(hpb); + + ufshpb_pre_req_mempool_destroy(hpb); + ufshpb_destroy_region_tbl(hpb); + + kmem_cache_destroy(hpb->map_req_cache); + kmem_cache_destroy(hpb->m_page_cache); + + list_del_init(&hpb->list_hpb_lu); + + kfree(hpb); +} + +static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) +{ + int pool_size; + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + bool init_success; + + if (tot_active_srgn_pages == 0) { + ufshpb_remove(hba); + return; + } + + init_success = !ufshpb_check_hpb_reset_query(hba); + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + if (pool_size > tot_active_srgn_pages) { + mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); + mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); + } + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (init_success) { + ufshpb_set_state(hpb, HPB_PRESENT); + if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) + queue_work(ufshpb_wq, &hpb->map_work); + } else { + dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); + ufshpb_destroy_lu(hba, sdev); + } + } + + if (!init_success) + ufshpb_remove(hba); +} + +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb; + int ret; + struct ufshpb_lu_info hpb_lu_info = { 0 }; + int lun = sdev->lun; + + if (lun >= hba->dev_info.max_lu_supported) + goto out; + + ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info); + if (ret) + goto out; + + hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev, + &hpb_lu_info); + if (!hpb) + goto out; + + tot_active_srgn_pages += hpb_lu_info.max_active_rgns * + hpb->srgns_per_rgn * hpb->pages_per_srgn; + +out: + /* All LUs are initialized */ + if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt)) + ufshpb_hpb_lu_prepared(hba); +} + +static int ufshpb_init_mem_wq(struct ufs_hba *hba) +{ + int ret; + unsigned int pool_size; + + ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache", + sizeof(struct ufshpb_map_ctx), + 0, 0, NULL); + if (!ufshpb_mctx_cache) { + dev_err(hba->dev, "ufshpb: cannot init mctx cache\n"); + return -ENOMEM; + } + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", + __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); + + ufshpb_mctx_pool = mempool_create_slab_pool(pool_size, + ufshpb_mctx_cache); + if (!ufshpb_mctx_pool) { + dev_err(hba->dev, "ufshpb: cannot init mctx pool\n"); + ret = -ENOMEM; + goto release_mctx_cache; + } + + ufshpb_page_pool = mempool_create_page_pool(pool_size, 0); + if (!ufshpb_page_pool) { + dev_err(hba->dev, "ufshpb: cannot init page pool\n"); + ret = -ENOMEM; + goto release_mctx_pool; + } + + ufshpb_wq = alloc_workqueue("ufshpb-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!ufshpb_wq) { + dev_err(hba->dev, "ufshpb: alloc workqueue failed\n"); + ret = -ENOMEM; + goto release_page_pool; + } + + return 0; + +release_page_pool: + mempool_destroy(ufshpb_page_pool); +release_mctx_pool: + mempool_destroy(ufshpb_mctx_pool); +release_mctx_cache: + kmem_cache_destroy(ufshpb_mctx_cache); + return ret; +} + +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) +{ + struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev; + int max_active_rgns = 0; + int hpb_num_lu; + + hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU]; + if (hpb_num_lu == 0) { + dev_err(hba->dev, "No HPB LU supported\n"); + hpb_info->hpb_disabled = true; + return; + } + + hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE]; + hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE]; + max_active_rgns = get_unaligned_be16(geo_buf + + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS); + + if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 || + max_active_rgns == 0) { + dev_err(hba->dev, "No HPB supported device\n"); + hpb_info->hpb_disabled = true; + return; + } +} + +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int version, ret; + int max_single_cmd; + + hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + + version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); + if ((version != HPB_SUPPORT_VERSION) && + (version != HPB_SUPPORT_LEGACY_VERSION)) { + dev_err(hba->dev, "%s: HPB %x version is not supported.\n", + __func__, version); + hpb_dev_info->hpb_disabled = true; + return; + } + + if (version == HPB_SUPPORT_LEGACY_VERSION) + hpb_dev_info->is_legacy = true; + + /* + * Get the number of user logical unit to check whether all + * scsi_device finish initialization + */ + hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; + + if (hpb_dev_info->is_legacy) + return; + + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd); + + if (ret) + hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH; + else + hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH); +} + +void ufshpb_init(struct ufs_hba *hba) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int try; + int ret; + + if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled) + return; + + if (ufshpb_init_mem_wq(hba)) { + hpb_dev_info->hpb_disabled = true; + return; + } + + atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu); + tot_active_srgn_pages = 0; + /* issue HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, NULL); + if (!ret) + break; + } +} + +void ufshpb_remove(struct ufs_hba *hba) +{ + mempool_destroy(ufshpb_page_pool); + mempool_destroy(ufshpb_mctx_pool); + kmem_cache_destroy(ufshpb_mctx_cache); + + destroy_workqueue(ufshpb_wq); +} + +module_param(ufshpb_host_map_kbytes, uint, 0644); +MODULE_PARM_DESC(ufshpb_host_map_kbytes, + "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool"); diff --git a/drivers/ufs/core/ufshpb.h b/drivers/ufs/core/ufshpb.h new file mode 100644 index 000000000000..0d6e6004d783 --- /dev/null +++ b/drivers/ufs/core/ufshpb.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee <ymhungry.lee@samsung.com> + * Jinyoung Choi <j-young.choi@samsung.com> + */ + +#ifndef _UFSHPB_H_ +#define _UFSHPB_H_ + +/* hpb response UPIU macro */ +#define HPB_RSP_NONE 0x0 +#define HPB_RSP_REQ_REGION_UPDATE 0x1 +#define HPB_RSP_DEV_RESET 0x2 +#define MAX_ACTIVE_NUM 2 +#define MAX_INACTIVE_NUM 2 +#define DEV_DATA_SEG_LEN 0x14 +#define DEV_SENSE_SEG_LEN 0x12 +#define DEV_DES_TYPE 0x80 +#define DEV_ADDITIONAL_LEN 0x10 + +/* hpb map & entries macro */ +#define HPB_RGN_SIZE_UNIT 512 +#define HPB_ENTRY_BLOCK_SIZE 4096 +#define HPB_ENTRY_SIZE 0x8 +#define PINNED_NOT_SET U32_MAX + +/* hpb support chunk size */ +#define HPB_LEGACY_CHUNK_HIGH 1 +#define HPB_MULTI_CHUNK_HIGH 255 + +/* hpb vender defined opcode */ +#define UFSHPB_READ 0xF8 +#define UFSHPB_READ_BUFFER 0xF9 +#define UFSHPB_READ_BUFFER_ID 0x01 +#define UFSHPB_WRITE_BUFFER 0xFA +#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01 +#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02 +#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03 +#define HPB_WRITE_BUFFER_CMD_LENGTH 10 +#define MAX_HPB_READ_ID 0x7F +#define HPB_READ_BUFFER_CMD_LENGTH 10 +#define LU_ENABLED_HPB_FUNC 0x02 + +#define HPB_RESET_REQ_RETRIES 10 +#define HPB_MAP_REQ_RETRIES 5 +#define HPB_REQUEUE_TIME_MS 0 + +#define HPB_SUPPORT_VERSION 0x200 +#define HPB_SUPPORT_LEGACY_VERSION 0x100 + +enum UFSHPB_MODE { + HPB_HOST_CONTROL, + HPB_DEVICE_CONTROL, +}; + +enum UFSHPB_STATE { + HPB_INIT, + HPB_PRESENT, + HPB_SUSPEND, + HPB_FAILED, + HPB_RESET, +}; + +enum HPB_RGN_STATE { + HPB_RGN_INACTIVE, + HPB_RGN_ACTIVE, + /* pinned regions are always active */ + HPB_RGN_PINNED, +}; + +enum HPB_SRGN_STATE { + HPB_SRGN_UNUSED, + HPB_SRGN_INVALID, + HPB_SRGN_VALID, + HPB_SRGN_ISSUED, +}; + +/** + * struct ufshpb_lu_info - UFSHPB logical unit related info + * @num_blocks: the number of logical block + * @pinned_start: the start region number of pinned region + * @num_pinned: the number of pinned regions + * @max_active_rgns: maximum number of active regions + */ +struct ufshpb_lu_info { + int num_blocks; + int pinned_start; + int num_pinned; + int max_active_rgns; +}; + +struct ufshpb_map_ctx { + struct page **m_page; + unsigned long *ppn_dirty; +}; + +struct ufshpb_subregion { + struct ufshpb_map_ctx *mctx; + enum HPB_SRGN_STATE srgn_state; + int rgn_idx; + int srgn_idx; + bool is_last; + + /* subregion reads - for host mode */ + unsigned int reads; + + /* below information is used by rsp_list */ + struct list_head list_act_srgn; +}; + +struct ufshpb_region { + struct ufshpb_lu *hpb; + struct ufshpb_subregion *srgn_tbl; + enum HPB_RGN_STATE rgn_state; + int rgn_idx; + int srgn_cnt; + + /* below information is used by rsp_list */ + struct list_head list_inact_rgn; + + /* below information is used by lru */ + struct list_head list_lru_rgn; + unsigned long rgn_flags; +#define RGN_FLAG_DIRTY 0 +#define RGN_FLAG_UPDATE 1 + + /* region reads - for host mode */ + spinlock_t rgn_lock; + unsigned int reads; + /* region "cold" timer - for host mode */ + ktime_t read_timeout; + unsigned int read_timeout_expiries; + struct list_head list_expired_rgn; +}; + +#define for_each_sub_region(rgn, i, srgn) \ + for ((i) = 0; \ + ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \ + (i)++) + +/** + * struct ufshpb_req - HPB related request structure (write/read buffer) + * @req: block layer request structure + * @bio: bio for this request + * @hpb: ufshpb_lu structure that related to + * @list_req: ufshpb_req mempool list + * @sense: store its sense data + * @mctx: L2P map information + * @rgn_idx: target region index + * @srgn_idx: target sub-region index + * @lun: target logical unit number + * @m_page: L2P map information data for pre-request + * @len: length of host-side cached L2P map in m_page + * @lpn: start LPN of L2P map in m_page + */ +struct ufshpb_req { + struct request *req; + struct bio *bio; + struct ufshpb_lu *hpb; + struct list_head list_req; + union { + struct { + struct ufshpb_map_ctx *mctx; + unsigned int rgn_idx; + unsigned int srgn_idx; + unsigned int lun; + } rb; + struct { + struct page *m_page; + unsigned int len; + unsigned long lpn; + } wb; + }; +}; + +struct victim_select_info { + struct list_head lh_lru_rgn; /* LRU list of regions */ + int max_lru_active_cnt; /* supported hpb #region - pinned #region */ + atomic_t active_cnt; +}; + +/** + * ufshpb_params - ufs hpb parameters + * @requeue_timeout_ms - requeue threshold of wb command (0x2) + * @activation_thld - min reads [IOs] to activate/update a region + * @normalization_factor - shift right the region's reads + * @eviction_thld_enter - min reads [IOs] for the entering region in eviction + * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction + * @read_timeout_ms - timeout [ms] from the last read IO to the region + * @read_timeout_expiries - amount of allowable timeout expireis + * @timeout_polling_interval_ms - frequency in which timeouts are checked + * @inflight_map_req - number of inflight map requests + */ +struct ufshpb_params { + unsigned int requeue_timeout_ms; + unsigned int activation_thld; + unsigned int normalization_factor; + unsigned int eviction_thld_enter; + unsigned int eviction_thld_exit; + unsigned int read_timeout_ms; + unsigned int read_timeout_expiries; + unsigned int timeout_polling_interval_ms; + unsigned int inflight_map_req; +}; + +struct ufshpb_stats { + u64 hit_cnt; + u64 miss_cnt; + u64 rcmd_noti_cnt; + u64 rcmd_active_cnt; + u64 rcmd_inactive_cnt; + u64 map_req_cnt; + u64 pre_req_cnt; + u64 umap_req_cnt; +}; + +struct ufshpb_lu { + int lun; + struct scsi_device *sdev_ufs_lu; + + spinlock_t rgn_state_lock; /* for protect rgn/srgn state */ + struct ufshpb_region *rgn_tbl; + + atomic_t hpb_state; + + spinlock_t rsp_list_lock; + struct list_head lh_act_srgn; /* hold rsp_list_lock */ + struct list_head lh_inact_rgn; /* hold rsp_list_lock */ + + /* pre request information */ + struct ufshpb_req *pre_req; + int num_inflight_pre_req; + int throttle_pre_req; + int num_inflight_map_req; /* hold param_lock */ + spinlock_t param_lock; + + struct list_head lh_pre_req_free; + int pre_req_max_tr_len; + + /* cached L2P map management worker */ + struct work_struct map_work; + + /* for selecting victim */ + struct victim_select_info lru_info; + struct work_struct ufshpb_normalization_work; + struct delayed_work ufshpb_read_to_work; + unsigned long work_data_bits; +#define TIMEOUT_WORK_RUNNING 0 + + /* pinned region information */ + u32 lu_pinned_start; + u32 lu_pinned_end; + + /* HPB related configuration */ + u32 rgns_per_lu; + u32 srgns_per_lu; + u32 last_srgn_entries; + int srgns_per_rgn; + u32 srgn_mem_size; + u32 entries_per_rgn_mask; + u32 entries_per_rgn_shift; + u32 entries_per_srgn; + u32 entries_per_srgn_mask; + u32 entries_per_srgn_shift; + u32 pages_per_srgn; + + bool is_hcm; + + struct ufshpb_stats stats; + struct ufshpb_params params; + + struct kmem_cache *map_req_cache; + struct kmem_cache *m_page_cache; + + struct list_head list_hpb_lu; +}; + +struct ufs_hba; +struct ufshcd_lrb; + +#ifndef CONFIG_SCSI_UFS_HPB +static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; } +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} +static void ufshpb_resume(struct ufs_hba *hba) {} +static void ufshpb_suspend(struct ufs_hba *hba) {} +static void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) {} +static void ufshpb_init(struct ufs_hba *hba) {} +static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_remove(struct ufs_hba *hba) {} +static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } +static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} +static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} +static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; } +#else +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_resume(struct ufs_hba *hba); +void ufshpb_suspend(struct ufs_hba *hba); +void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest); +void ufshpb_init(struct ufs_hba *hba); +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_remove(struct ufs_hba *hba); +bool ufshpb_is_allowed(struct ufs_hba *hba); +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); +bool ufshpb_is_legacy(struct ufs_hba *hba); +extern struct attribute_group ufs_sysfs_hpb_stat_group; +extern struct attribute_group ufs_sysfs_hpb_param_group; +#endif + +#endif /* End of Header */ diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig new file mode 100644 index 000000000000..82590224da13 --- /dev/null +++ b/drivers/ufs/host/Kconfig @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Kernel configuration file for the UFS host controller drivers. +# +# Copyright (C) 2011-2013 Samsung India Software Operations +# +# Authors: +# Santosh Yaraganavi <santosh.sy@samsung.com> +# Vinayak Holikatti <h.vinayak@samsung.com> + +config SCSI_UFSHCD_PCI + tristate "PCI bus based UFS Controller support" + depends on PCI + help + This selects the PCI UFS Host Controller Interface. Select this if + you have UFS Host Controller with PCI Interface. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config SCSI_UFS_DWC_TC_PCI + tristate "DesignWare pci support using a G210 Test Chip" + depends on SCSI_UFSHCD_PCI + help + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on HAS_IOMEM + help + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config SCSI_UFS_CDNS_PLATFORM + tristate "Cadence UFS Controller platform driver" + depends on SCSI_UFSHCD_PLATFORM + help + This selects the Cadence-specific additions to UFSHCD platform driver. + + If unsure, say N. + +config SCSI_UFS_DWC_TC_PLATFORM + tristate "DesignWare platform support using a G210 Test Chip" + depends on SCSI_UFSHCD_PLATFORM + help + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + +config SCSI_UFS_QCOM + tristate "QCOM specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM + select QCOM_SCM if SCSI_UFS_CRYPTO + select RESET_CONTROLLER + help + This selects the QCOM specific additions to UFSHCD platform driver. + UFS host on QCOM needs some vendor specific configuration before + accessing the hardware which includes PHY configuration and vendor + specific registers. + + Select this if you have UFS controller on QCOM chipset. + If unsure, say N. + +config SCSI_UFS_MEDIATEK + tristate "Mediatek specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK + select PHY_MTK_UFS + select RESET_TI_SYSCON + help + This selects the Mediatek specific additions to UFSHCD platform driver. + UFS host on Mediatek needs some vendor specific configuration before + accessing the hardware which includes PHY configuration and vendor + specific registers. + + Select this if you have UFS controller on Mediatek chipset. + + If unsure, say N. + +config SCSI_UFS_HISI + tristate "Hisilicon specific hooks to UFS controller platform driver" + depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM + help + This selects the Hisilicon specific additions to UFSHCD platform driver. + + Select this if you have UFS controller on Hisilicon chipset. + If unsure, say N. + +config SCSI_UFS_TI_J721E + tristate "TI glue layer for Cadence UFS Controller" + depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST) + help + This selects driver for TI glue layer for Cadence UFS Host + Controller IP. + + Selects this if you have TI platform with UFS controller. + If unsure, say N. + +config SCSI_UFS_EXYNOS + tristate "Exynos specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST) + help + This selects the Samsung Exynos SoC specific additions to UFSHCD + platform driver. UFS host on Samsung Exynos SoC includes HCI and + UNIPRO layer, and associates with UFS-PHY driver. + + Select this if you have UFS host controller on Samsung Exynos SoC. + If unsure, say N. diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile new file mode 100644 index 000000000000..e4be54273c98 --- /dev/null +++ b/drivers/ufs/host/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o +obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o +obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o +obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o +ufs_qcom-y += ufs-qcom.o +ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o +obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o +obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o +obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o +obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o +obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c new file mode 100644 index 000000000000..e05c0ae64eea --- /dev/null +++ b/drivers/ufs/host/cdns-pltfrm.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Platform UFS Host driver for Cadence controller + * + * Copyright (C) 2018 Cadence Design Systems, Inc. + * + * Authors: + * Jan Kotas <jank@cadence.com> + * + */ + +#include <linux/clk.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/time.h> + +#include "ufshcd-pltfrm.h" + +#define CDNS_UFS_REG_HCLKDIV 0xFC +#define CDNS_UFS_REG_PHY_XCFGD1 0x113C +#define CDNS_UFS_MAX_L4_ATTRS 12 + +struct cdns_ufs_host { + /** + * cdns_ufs_dme_attr_val - for storing L4 attributes + */ + u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS]; +}; + +/** + * cdns_ufs_get_l4_attr - get L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_get_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID), + &host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID), + &host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + &host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID), + &host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS), + &host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + &host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE), + &host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + &host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_l4_attr - set L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_set_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), + host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), + host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID), + host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), + host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE), + host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_hclkdiv() + * Sets HCLKDIV register value based on the core_clk + * @hba: host controller instance + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + unsigned long core_clk_rate = 0; + u32 core_clk_div = 0; + + if (list_empty(head)) + return 0; + + list_for_each_entry(clki, head, list) { + if (IS_ERR_OR_NULL(clki->clk)) + continue; + if (!strcmp(clki->name, "core_clk")) + core_clk_rate = clk_get_rate(clki->clk); + } + + if (!core_clk_rate) { + dev_err(hba->dev, "%s: unable to find core_clk rate\n", + __func__); + return -EINVAL; + } + + core_clk_div = core_clk_rate / USEC_PER_SEC; + + ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV); + /** + * Make sure the register was updated, + * UniPro layer will not work with an incorrect value. + */ + mb(); + + return 0; +} + +/** + * cdns_ufs_hce_enable_notify() + * Called before and after HCE enable bit is set. + * @hba: host controller instance + * @status: notify stage (pre, post change) + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status != PRE_CHANGE) + return 0; + + return cdns_ufs_set_hclkdiv(hba); +} + +/** + * cdns_ufs_hibern8_notify() + * Called around hibern8 enter/exit. + * @hba: host controller instance + * @cmd: UIC Command + * @status: notify stage (pre, post change) + * + */ +static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) + cdns_ufs_get_l4_attr(hba); + if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) + cdns_ufs_set_l4_attr(hba); +} + +/** + * cdns_ufs_link_startup_notify() + * Called before and after Link startup is carried out. + * @hba: host controller instance + * @status: notify stage (pre, post change) + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status != PRE_CHANGE) + return 0; + + /* + * Some UFS devices have issues if LCC is enabled. + * So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + ufshcd_disable_host_tx_lcc(hba); + + /* + * Disabling Autohibern8 feature in cadence UFS + * to mask unexpected interrupt trigger. + */ + hba->ahit = 0; + + return 0; +} + +/** + * cdns_ufs_init - performs additional ufs initialization + * @hba: host controller instance + * + * Returns status of initialization + */ +static int cdns_ufs_init(struct ufs_hba *hba) +{ + int status = 0; + struct cdns_ufs_host *host; + struct device *dev = hba->dev; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); + + status = ufshcd_vops_phy_initialization(hba); + + return status; +} + +/** + * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization + * @hba: host controller instance + * + * Always returns 0 + */ +static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba) +{ + u32 data; + + /* Increase RX_Advanced_Min_ActivateTime_Capability */ + data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1); + data |= BIT(24); + ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1); + + return 0; +} + +static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = { + .name = "cdns-ufs-pltfm", + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, + .hibern8_notify = cdns_ufs_hibern8_notify, +}; + +static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { + .name = "cdns-ufs-pltfm", + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, + .phy_initialization = cdns_ufs_m31_16nm_phy_initialization, + .hibern8_notify = cdns_ufs_hibern8_notify, +}; + +static const struct of_device_id cdns_ufs_of_match[] = { + { + .compatible = "cdns,ufshc", + .data = &cdns_ufs_pltfm_hba_vops, + }, + { + .compatible = "cdns,ufshc-m31-16nm", + .data = &cdns_ufs_m31_16nm_pltfm_hba_vops, + }, + { }, +}; + +MODULE_DEVICE_TABLE(of, cdns_ufs_of_match); + +/** + * cdns_ufs_pltfrm_probe - probe routine of the driver + * @pdev: pointer to platform device handle + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_pltfrm_probe(struct platform_device *pdev) +{ + int err; + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(cdns_ufs_of_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * cdns_ufs_pltfrm_remove - removes the ufs driver + * @pdev: pointer to platform device handle + * + * Always returns 0 + */ +static int cdns_ufs_pltfrm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + ufshcd_remove(hba); + return 0; +} + +static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver cdns_ufs_pltfrm_driver = { + .probe = cdns_ufs_pltfrm_probe, + .remove = cdns_ufs_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "cdns-ufshcd", + .pm = &cdns_ufs_dev_pm_ops, + .of_match_table = cdns_ufs_of_match, + }, +}; + +module_platform_driver(cdns_ufs_pltfrm_driver); + +MODULE_AUTHOR("Jan Kotas <jank@cadence.com>"); +MODULE_DESCRIPTION("Cadence UFS host controller platform driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c new file mode 100644 index 000000000000..92b8ad4b58fe --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210-pci.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#include <ufs/ufshcd.h> +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> + +/* Test Chip type expected values */ +#define TC_G210_20BIT 20 +#define TC_G210_40BIT 40 +#define TC_G210_INV 0 + +static int tc_type = TC_G210_INV; +module_param(tc_type, int, 0); +MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)"); + +/* + * struct ufs_hba_dwc_vops - UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = { + .name = "tc-dwc-g210-pci", + .link_startup_notify = ufshcd_dwc_link_startup_notify, +}; + +/** + * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +/** + * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev: pointer to PCI handle + */ +static void tc_dwc_g210_pci_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); +} + +/** + * tc_dwc_g210_pci_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int +tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + int err; + + /* Check Test Chip type and set the specific setup routine */ + if (tc_type == TC_G210_20BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_20_bit; + } else if (tc_type == TC_G210_40BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_40_bit; + } else { + dev_err(&pdev->dev, "test chip version not specified\n"); + return -EPERM; + } + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; + } + + pci_set_master(pdev); + + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; + } + + mmio_base = pcim_iomap_table(pdev)[0]; + + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + hba->vops = &tc_dwc_g210_pci_hba_vops; + + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + return err; + } + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static const struct pci_device_id tc_dwc_g210_pci_tbl[] = { + { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl); + +static struct pci_driver tc_dwc_g210_pci_driver = { + .name = "tc-dwc-g210-pci", + .id_table = tc_dwc_g210_pci_tbl, + .probe = tc_dwc_g210_pci_probe, + .remove = tc_dwc_g210_pci_remove, + .shutdown = tc_dwc_g210_pci_shutdown, + .driver = { + .pm = &tc_dwc_g210_pci_pm_ops + }, +}; + +module_pci_driver(tc_dwc_g210_pci_driver); + +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c new file mode 100644 index 000000000000..f15a84d0c176 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> + +#include "ufshcd-pltfrm.h" +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +/* + * UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_20_bit, +}; + +static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_40_bit, +}; + +static const struct of_device_id tc_dwc_g210_pltfm_match[] = { + { + .compatible = "snps,g210-tc-6.00-20bit", + .data = &tc_dwc_g210_20bit_pltfm_hba_vops, + }, + { + .compatible = "snps,g210-tc-6.00-40bit", + .data = &tc_dwc_g210_40bit_pltfm_hba_vops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match); + +/** + * tc_dwc_g210_pltfm_probe() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev) +{ + int err; + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * tc_dwc_g210_pltfm_remove() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) +}; + +static struct platform_driver tc_dwc_g210_pltfm_driver = { + .probe = tc_dwc_g210_pltfm_probe, + .remove = tc_dwc_g210_pltfm_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "tc-dwc-g210-pltfm", + .pm = &tc_dwc_g210_pltfm_pm_ops, + .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match), + }, +}; + +module_platform_driver(tc_dwc_g210_pltfm_driver); + +MODULE_ALIAS("platform:tc-dwc-g210-pltfm"); +MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver"); +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c new file mode 100644 index 000000000000..deb93dbd83a4 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#include <linux/module.h> + +#include <ufs/ufshcd.h> +#include <ufs/unipro.h> + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" +#include "tc-dwc-g210.h" + +/** + * tc_dwc_g210_setup_40bit_rmmi() + * This function configures Synopsys TC specific atributes (40-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane0() + * This function configures Synopsys TC 20-bit RMMI Lane 0 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane1() + * This function configures Synopsys TC 20-bit RMMI Lane 1 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba) +{ + int connected_rx_lanes = 0; + int connected_tx_lanes = 0; + int ret = 0; + + static const struct ufshcd_dme_attr_val setup_tx_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + }; + + static const struct ufshcd_dme_attr_val setup_rx_attrs[] = { + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f, + DME_LOCAL }, + }; + + /* Get the available lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &connected_rx_lanes); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &connected_tx_lanes); + + if (connected_tx_lanes == 2) { + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs, + ARRAY_SIZE(setup_tx_attrs)); + + if (ret) + goto out; + } + + if (connected_rx_lanes == 2) { + ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs, + ARRAY_SIZE(setup_rx_attrs)); + } + +out: + return ret; +} + +/** + * tc_dwc_g210_setup_20bit_rmmi() + * This function configures Synopsys TC specific atributes (20-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba) +{ + int ret = 0; + + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + }; + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); + if (ret) + goto out; + + /* Lane 0 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba); + if (ret) + goto out; + + /* Lane 1 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba); + if (ret) + goto out; + +out: + return ret; +} + +/** + * tc_dwc_g210_config_40_bit() + * This function configures Local (host) Synopsys 40-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n"); + ret = tc_dwc_g210_setup_40bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_40_bit); + +/** + * tc_dwc_g210_config_20_bit() + * This function configures Local (host) Synopsys 20-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n"); + ret = tc_dwc_g210_setup_20bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_20_bit); + +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys G210 Test Chip driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/tc-dwc-g210.h b/drivers/ufs/host/tc-dwc-g210.h new file mode 100644 index 000000000000..f7154012f5c7 --- /dev/null +++ b/drivers/ufs/host/tc-dwc-g210.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#ifndef _TC_DWC_G210_H +#define _TC_DWC_G210_H + +struct ufs_hba; + +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba); +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba); + +#endif /* End of Header */ diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c new file mode 100644 index 000000000000..122d650d0810 --- /dev/null +++ b/drivers/ufs/host/ti-j721e-ufs.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#define TI_UFS_SS_CTRL 0x4 +#define TI_UFS_SS_RST_N_PCS BIT(0) +#define TI_UFS_SS_CLK_26MHZ BIT(4) + +static int ti_j721e_ufs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long clk_rate; + void __iomem *regbase; + struct clk *clk; + u32 reg = 0; + int ret; + + regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regbase)) + return PTR_ERR(regbase); + + pm_runtime_enable(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto disable_pm; + + /* Select MPHY refclk frequency */ + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "Cannot claim MPHY clock.\n"); + goto clk_err; + } + clk_rate = clk_get_rate(clk); + if (clk_rate == 26000000) + reg |= TI_UFS_SS_CLK_26MHZ; + devm_clk_put(dev, clk); + + /* Take UFS slave device out of reset */ + reg |= TI_UFS_SS_RST_N_PCS; + writel(reg, regbase + TI_UFS_SS_CTRL); + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, + dev); + if (ret) { + dev_err(dev, "failed to populate child nodes %d\n", ret); + goto clk_err; + } + + return ret; + +clk_err: + pm_runtime_put_sync(dev); +disable_pm: + pm_runtime_disable(dev); + return ret; +} + +static int ti_j721e_ufs_remove(struct platform_device *pdev) +{ + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id ti_j721e_ufs_of_match[] = { + { + .compatible = "ti,j721e-ufs", + }, + { }, +}; + +static struct platform_driver ti_j721e_ufs_driver = { + .probe = ti_j721e_ufs_probe, + .remove = ti_j721e_ufs_remove, + .driver = { + .name = "ti-j721e-ufs", + .of_match_table = ti_j721e_ufs_of_match, + }, +}; +module_platform_driver(ti_j721e_ufs_driver); + +MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); +MODULE_DESCRIPTION("TI UFS host controller glue driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c new file mode 100644 index 000000000000..a81d8cbd542f --- /dev/null +++ b/drivers/ufs/host/ufs-exynos.c @@ -0,0 +1,1630 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS Host Controller driver for Exynos specific extensions + * + * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. + * Author: Seungwon Jeon <essuuj@gmail.com> + * Author: Alim Akhtar <alim.akhtar@samsung.com> + * + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/mfd/syscon.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <ufs/ufshcd.h> +#include "ufshcd-pltfrm.h" +#include <ufs/ufshci.h> +#include <ufs/unipro.h> + +#include "ufs-exynos.h" + +/* + * Exynos's Vendor specific registers for UFSHCI + */ +#define HCI_TXPRDT_ENTRY_SIZE 0x00 +#define PRDT_PREFECT_EN BIT(31) +#define PRDT_SET_SIZE(x) ((x) & 0x1F) +#define HCI_RXPRDT_ENTRY_SIZE 0x04 +#define HCI_1US_TO_CNT_VAL 0x0C +#define CNT_VAL_1US_MASK 0x3FF +#define HCI_UTRL_NEXUS_TYPE 0x40 +#define HCI_UTMRL_NEXUS_TYPE 0x44 +#define HCI_SW_RST 0x50 +#define UFS_LINK_SW_RST BIT(0) +#define UFS_UNIPRO_SW_RST BIT(1) +#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST) +#define HCI_DATA_REORDER 0x60 +#define HCI_UNIPRO_APB_CLK_CTRL 0x68 +#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) +#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define HCI_GPIO_OUT 0x70 +#define HCI_ERR_EN_PA_LAYER 0x78 +#define HCI_ERR_EN_DL_LAYER 0x7C +#define HCI_ERR_EN_N_LAYER 0x80 +#define HCI_ERR_EN_T_LAYER 0x84 +#define HCI_ERR_EN_DME_LAYER 0x88 +#define HCI_CLKSTOP_CTRL 0xB0 +#define REFCLKOUT_STOP BIT(4) +#define REFCLK_STOP BIT(2) +#define UNIPRO_MCLK_STOP BIT(1) +#define UNIPRO_PCLK_STOP BIT(0) +#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\ + UNIPRO_MCLK_STOP |\ + UNIPRO_PCLK_STOP) +#define HCI_MISC 0xB4 +#define REFCLK_CTRL_EN BIT(7) +#define UNIPRO_PCLK_CTRL_EN BIT(6) +#define UNIPRO_MCLK_CTRL_EN BIT(5) +#define HCI_CORECLK_CTRL_EN BIT(4) +#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ + UNIPRO_PCLK_CTRL_EN |\ + UNIPRO_MCLK_CTRL_EN) +/* Device fatal error */ +#define DFES_ERR_EN BIT(31) +#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ + UIC_DATA_LINK_LAYER_ERROR_PA_INIT) +#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\ + UIC_NETWORK_BAD_DEVICEID_ENC |\ + UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING) +#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\ + UIC_TRANSPORT_UNKNOWN_CPORTID |\ + UIC_TRANSPORT_NO_CONNECTION_RX |\ + UIC_TRANSPORT_BAD_TC) + +/* FSYS UFS Shareability */ +#define UFS_WR_SHARABLE BIT(2) +#define UFS_RD_SHARABLE BIT(1) +#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE) +#define UFS_SHAREABILITY_OFFSET 0x710 + +/* Multi-host registers */ +#define MHCTRL 0xC4 +#define MHCTRL_EN_VH_MASK (0xE) +#define MHCTRL_EN_VH(vh) (vh << 1) +#define PH2VH_MBOX 0xD8 + +#define MH_MSG_MASK (0xFF) + +#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF)) +#define MH_MSG_PH_READY 0x1 +#define MH_MSG_VH_READY 0x2 + +#define ALLOW_INQUIRY BIT(25) +#define ALLOW_MODE_SELECT BIT(24) +#define ALLOW_MODE_SENSE BIT(23) +#define ALLOW_PRE_FETCH GENMASK(22, 21) +#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */ +#define ALLOW_READ_BUFFER BIT(17) +#define ALLOW_READ_CAPACITY GENMASK(16, 15) +#define ALLOW_REPORT_LUNS BIT(14) +#define ALLOW_REQUEST_SENSE BIT(13) +#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7) +#define ALLOW_TEST_UNIT_READY BIT(6) +#define ALLOW_UNMAP BIT(5) +#define ALLOW_VERIFY BIT(4) +#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */ + +#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \ + ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \ + ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \ + ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \ + ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \ + ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \ + ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL) + +#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C +#define HCI_MH_IID_IN_TASK_TAG 0X308 + +#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC) + +enum { + UNIPRO_L1_5 = 0,/* PHY Adapter */ + UNIPRO_L2, /* Data Link */ + UNIPRO_L3, /* Network */ + UNIPRO_L4, /* Transport */ + UNIPRO_DME, /* DME */ +}; + +/* + * UNIPRO registers + */ +#define UNIPRO_COMP_VERSION 0x000 +#define UNIPRO_DME_PWR_REQ 0x090 +#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094 +#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098 +#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C +#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0 +#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4 +#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8 +#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC + +/* + * UFS Protector registers + */ +#define UFSPRSECURITY 0x010 +#define NSSMU BIT(14) +#define UFSPSBEGIN0 0x200 +#define UFSPSEND0 0x204 +#define UFSPSLUN0 0x208 +#define UFSPSCTRL0 0x20C + +#define CNTR_DIV_VAL 40 + +static struct exynos_ufs_drv_data exynos_ufs_drvs; +static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en); +static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en); + +static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs) +{ + exynos_ufs_auto_ctrl_hcc(ufs, true); +} + +static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs) +{ + exynos_ufs_auto_ctrl_hcc(ufs, false); +} + +static inline void exynos_ufs_disable_auto_ctrl_hcc_save( + struct exynos_ufs *ufs, u32 *val) +{ + *val = hci_readl(ufs, HCI_MISC); + exynos_ufs_auto_ctrl_hcc(ufs, false); +} + +static inline void exynos_ufs_auto_ctrl_hcc_restore( + struct exynos_ufs *ufs, u32 *val) +{ + hci_writel(ufs, *val, HCI_MISC); +} + +static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs) +{ + exynos_ufs_ctrl_clkstop(ufs, true); +} + +static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) +{ + exynos_ufs_ctrl_clkstop(ufs, false); +} + +static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + return 0; +} + +static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + /* IO Coherency setting */ + if (ufs->sysreg) { + return regmap_update_bits(ufs->sysreg, + ufs->shareability_reg_offset, + UFS_SHARABLE, UFS_SHARABLE); + } + + attr->tx_dif_p_nsec = 3200000; + + return 0; +} + +static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + /* Enable Virtual Host #1 */ + ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL); + /* Default VH Transfer permissions */ + hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH); + /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */ + hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG); + + return 0; +} + +static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + u32 tx_line_reset_period, rx_line_reset_period; + + rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC; + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i), + (rx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i), + (rx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i), + (rx_line_reset_period) & 0xFF); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i), + 0x02); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i), + (tx_line_reset_period >> 16) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i), + (tx_line_reset_period >> 8) & 0xFF); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i), + (tx_line_reset_period) & 0xFF); + + /* TX PWM Gear Capability / PWM_G1_ONLY */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1); + } + + ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0); + + ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000); + + return 0; +} + +static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + + /* PACP_PWR_req and delivered to the remote DME */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000); + + return 0; +} + +static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u32 enabled_vh; + + enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK; + + /* Send physical host ready message to virtual hosts */ + ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX); + + return 0; +} + +static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite; + int i; + + exynos_ufs_enable_ov_tm(hba); + for_each_ufs_tx_lane(ufs, i) + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00); + } + exynos_ufs_disable_ov_tm(hba); + + for_each_ufs_tx_lane(ufs, i) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1); + udelay(1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1); + udelay(1600); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val); + + return 0; +} + +static int exynos7_ufs_post_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + + exynos_ufs_enable_ov_tm(hba); + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i), + TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000))); + } + exynos_ufs_disable_ov_tm(hba); + + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8); + exynos_ufs_disable_dbg_mode(hba); + + return 0; +} + +static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE); + + return 0; +} + +static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1); + + if (lanes == 1) { + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1); + exynos_ufs_disable_dbg_mode(hba); + } + + return 0; +} + +/* + * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w + * Control should be disabled in the below cases + * - Before host controller S/W reset + * - Access to UFS protector's register + */ +static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en) +{ + u32 misc = hci_readl(ufs, HCI_MISC); + + if (en) + hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC); + else + hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC); +} + +static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en) +{ + u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL); + u32 misc = hci_readl(ufs, HCI_MISC); + + if (en) { + hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC); + hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL); + } else { + hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL); + hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC); + } +} + +static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct list_head *head = &hba->clk_list_head; + struct ufs_clk_info *clki; + unsigned long pclk_rate; + u32 f_min, f_max; + u8 div = 0; + int ret = 0; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR(clki->clk)) { + if (!strcmp(clki->name, "core_clk")) + ufs->clk_hci_core = clki->clk; + else if (!strcmp(clki->name, "sclk_unipro_main")) + ufs->clk_unipro_main = clki->clk; + } + } + + if (!ufs->clk_hci_core || !ufs->clk_unipro_main) { + dev_err(hba->dev, "failed to get clk info\n"); + ret = -EINVAL; + goto out; + } + + ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main); + pclk_rate = clk_get_rate(ufs->clk_hci_core); + f_min = ufs->pclk_avail_min; + f_max = ufs->pclk_avail_max; + + if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { + do { + pclk_rate /= (div + 1); + + if (pclk_rate <= f_max) + break; + div++; + } while (pclk_rate >= f_min); + } + + if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) { + dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate); + ret = -EINVAL; + goto out; + } + + ufs->pclk_rate = pclk_rate; + ufs->pclk_div = div; + +out: + return ret; +} + +static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs) +{ + if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { + u32 val; + + val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL); + hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div), + HCI_UNIPRO_APB_CLK_CTRL); + } +} + +static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + ufshcd_dme_set(hba, + UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl); +} + +static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + const unsigned int div = 30, mult = 20; + const unsigned long pwm_min = 3 * 1000 * 1000; + const unsigned long pwm_max = 9 * 1000 * 1000; + const int divs[] = {32, 16, 8, 4}; + unsigned long clk = 0, _clk, clk_period; + int i = 0, clk_idx = -1; + + clk_period = UNIPRO_PCLK_PERIOD(ufs); + for (i = 0; i < ARRAY_SIZE(divs); i++) { + _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div); + if (_clk >= pwm_min && _clk <= pwm_max) { + if (_clk > clk) { + clk_idx = i; + clk = _clk; + } + } + } + + if (clk_idx == -1) { + ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx); + dev_err(hba->dev, + "failed to decide pwm clock divider, will not change\n"); + } + + attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK; +} + +long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period) +{ + const int precise = 10; + long pclk_rate = ufs->pclk_rate; + long clk_period, fraction; + + clk_period = UNIPRO_PCLK_PERIOD(ufs); + fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate; + + return (period * precise) / ((clk_period * precise) + fraction); +} + +static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + + t_cfg->tx_linereset_p = + exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); + t_cfg->tx_linereset_n = + exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec); + t_cfg->tx_high_z_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec); + t_cfg->tx_base_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec); + t_cfg->tx_gran_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec); + t_cfg->tx_sleep_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt); + + t_cfg->rx_linereset = + exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec); + t_cfg->rx_hibern8_wait = + exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec); + t_cfg->rx_base_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec); + t_cfg->rx_gran_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec); + t_cfg->rx_sleep_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt); + t_cfg->rx_stall_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt); +} + +static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + int i; + + exynos_ufs_set_pwm_clk_div(ufs); + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i), + ufs->drv_data->uic_attr->rx_filler_enable); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i), + RX_LINERESET(t_cfg->rx_linereset)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i), + RX_BASE_NVAL_L(t_cfg->rx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i), + RX_BASE_NVAL_H(t_cfg->rx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i), + RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i), + RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i), + RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i), + RX_OV_STALL_CNT(t_cfg->rx_stall_cnt)); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i), + TX_LINERESET_P(t_cfg->tx_linereset_p)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i), + TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i), + TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i), + TX_BASE_NVAL_L(t_cfg->tx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i), + TX_BASE_NVAL_H(t_cfg->tx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i), + TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i), + TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i), + TX_OV_H8_ENTER_EN | + TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i), + ufs->drv_data->uic_attr->tx_min_activatetime); + } + + exynos_ufs_disable_ov_tm(hba); +} + +static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + int i; + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i), + attr->rx_hs_g1_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i), + attr->rx_hs_g2_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i), + attr->rx_hs_g3_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i), + attr->rx_hs_g1_prep_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i), + attr->rx_hs_g2_prep_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i), + attr->rx_hs_g3_prep_sync_len_cap); + } + + if (attr->rx_adv_fine_gran_sup_en == 0) { + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0); + + if (attr->rx_min_actv_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP, + i), attr->rx_min_actv_time_cap); + + if (attr->rx_hibern8_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i), + attr->rx_hibern8_time_cap); + } + } else if (attr->rx_adv_fine_gran_sup_en == 1) { + for_each_ufs_rx_lane(ufs, i) { + if (attr->rx_adv_fine_gran_step) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, + i), RX_ADV_FINE_GRAN_STEP( + attr->rx_adv_fine_gran_step)); + + if (attr->rx_adv_min_actv_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL( + RX_ADV_MIN_ACTIVATETIME_CAP, i), + attr->rx_adv_min_actv_time_cap); + + if (attr->rx_adv_hibern8_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP, + i), + attr->rx_adv_hibern8_time_cap); + } + } + + exynos_ufs_disable_ov_tm(hba); +} + +static void exynos_ufs_establish_connt(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + enum { + DEV_ID = 0x00, + PEER_DEV_ID = 0x01, + PEER_CPORT_ID = 0x00, + TRAFFIC_CLASS = 0x00, + }; + + /* allow cport attributes to be set */ + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE); + + /* local unipro attributes */ + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED); +} + +static void exynos_ufs_config_smu(struct exynos_ufs *ufs) +{ + u32 reg, val; + + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); + + /* make encryption disabled by default */ + reg = ufsp_readl(ufs, UFSPRSECURITY); + ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY); + ufsp_writel(ufs, 0x0, UFSPSBEGIN0); + ufsp_writel(ufs, 0xffffffff, UFSPSEND0); + ufsp_writel(ufs, 0xff, UFSPSLUN0); + ufsp_writel(ufs, 0xf1, UFSPSCTRL0); + + exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); +} + +static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx); + u32 mask, sync_len; + enum { + SYNC_LEN_G1 = 80 * 1000, /* 80us */ + SYNC_LEN_G2 = 40 * 1000, /* 44us */ + SYNC_LEN_G3 = 20 * 1000, /* 20us */ + }; + int i; + + if (g == 1) + sync_len = SYNC_LEN_G1; + else if (g == 2) + sync_len = SYNC_LEN_G2; + else if (g == 3) + sync_len = SYNC_LEN_G3; + else + return; + + mask = exynos_ufs_calc_time_cntr(ufs, sync_len); + mask = (mask >> 8) & 0xff; + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask); + + exynos_ufs_disable_ov_tm(hba); +} + +static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + struct ufs_dev_params ufs_exynos_cap; + int ret; + + if (!dev_req_params) { + pr_err("%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + ufshcd_init_pwr_dev_param(&ufs_exynos_cap); + + ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap, + dev_max_params, dev_req_params); + if (ret) { + pr_err("%s: failed to determine capabilities\n", __func__); + goto out; + } + + if (ufs->drv_data->pre_pwr_change) + ufs->drv_data->pre_pwr_change(ufs, dev_req_params); + + if (ufshcd_is_hs_mode(dev_req_params)) { + exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params); + + switch (dev_req_params->hs_rate) { + case PA_HS_MODE_A: + case PA_HS_MODE_B: + phy_calibrate(generic_phy); + break; + } + } + + /* setting for three timeout values for traffic class #0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064); + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224); + ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160); + + return 0; +out: + return ret; +} + +#define PWR_MODE_STR_LEN 64 +static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_req) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx); + int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx); + char pwr_str[PWR_MODE_STR_LEN] = ""; + + /* let default be PWM Gear 1, Lane 1 */ + if (!gear) + gear = 1; + + if (!lanes) + lanes = 1; + + if (ufs->drv_data->post_pwr_change) + ufs->drv_data->post_pwr_change(ufs, pwr_req); + + if ((ufshcd_is_hs_mode(pwr_req))) { + switch (pwr_req->hs_rate) { + case PA_HS_MODE_A: + case PA_HS_MODE_B: + phy_calibrate(generic_phy); + break; + } + + snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d", + "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B", + gear, lanes); + } else { + snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d", + "SLOW", gear, lanes); + } + + dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str); + + return 0; +} + +static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba, + int tag, bool is_scsi_cmd) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + u32 type; + + type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE); + + if (is_scsi_cmd) + hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE); + else + hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE); +} + +static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba, + int tag, u8 func) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + u32 type; + + type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE); + + switch (func) { + case UFS_ABORT_TASK: + case UFS_QUERY_TASK: + hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE); + break; + case UFS_ABORT_TASK_SET: + case UFS_CLEAR_TASK_SET: + case UFS_LOGICAL_RESET: + case UFS_QUERY_TASK_SET: + hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE); + break; + } +} + +static int exynos_ufs_phy_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct phy *generic_phy = ufs->phy; + int ret = 0; + + if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &ufs->avail_ln_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &ufs->avail_ln_tx); + WARN(ufs->avail_ln_rx != ufs->avail_ln_tx, + "available data lane is not equal(rx:%d, tx:%d)\n", + ufs->avail_ln_rx, ufs->avail_ln_tx); + } + + phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + ret = phy_init(generic_phy); + if (ret) { + dev_err(hba->dev, "%s: phy init failed, ret = %d\n", + __func__, ret); + goto out_exit_phy; + } + + return 0; + +out_exit_phy: + phy_exit(generic_phy); + + return ret; +} + +static void exynos_ufs_config_unipro(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS), + ufs->drv_data->uic_attr->tx_trailingclks); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), + ufs->drv_data->uic_attr->pa_dbg_option_suite); +} + +static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) +{ + switch (index) { + case UNIPRO_L1_5: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER); + break; + case UNIPRO_L2: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER); + break; + case UNIPRO_L3: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER); + break; + case UNIPRO_L4: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER); + break; + case UNIPRO_DME: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER); + break; + } +} + +static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufs) + return 0; + + if (on && status == PRE_CHANGE) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + } else if (!on && status == POST_CHANGE) { + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } + + return 0; +} + +static int exynos_ufs_pre_link(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + /* hci */ + exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2); + exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3); + exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); + exynos_ufs_set_unipro_pclk_div(ufs); + + /* unipro */ + exynos_ufs_config_unipro(ufs); + + /* m-phy */ + exynos_ufs_phy_init(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { + exynos_ufs_config_phy_time_attr(ufs); + exynos_ufs_config_phy_cap_attr(ufs); + } + + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); + + if (ufs->drv_data->pre_link) + ufs->drv_data->pre_link(ufs); + + return 0; +} + +static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs) +{ + u32 val; + + val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL); + hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL); +} + +static int exynos_ufs_post_link(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + exynos_ufs_establish_connt(ufs); + exynos_ufs_fit_aggr_timeout(ufs); + + hci_writel(ufs, 0xa, HCI_DATA_REORDER); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); + hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); + + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) + ufshcd_dme_set(hba, + UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true); + + if (attr->pa_granularity) { + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY), + attr->pa_granularity); + exynos_ufs_disable_dbg_mode(hba); + + if (attr->pa_tactivate) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + attr->pa_tactivate); + if (attr->pa_hibern8time && + !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER)) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + attr->pa_hibern8time); + } + + if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { + if (!attr->pa_granularity) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &attr->pa_granularity); + if (!attr->pa_hibern8time) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + &attr->pa_hibern8time); + /* + * not wait for HIBERN8 time to exit hibernation + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0); + + if (attr->pa_granularity < 1 || attr->pa_granularity > 6) { + /* Valid range for granularity: 1 ~ 6 */ + dev_warn(hba->dev, + "%s: pa_granularity %d is invalid, assuming backwards compatibility\n", + __func__, + attr->pa_granularity); + attr->pa_granularity = 6; + } + } + + phy_calibrate(generic_phy); + + if (ufs->drv_data->post_link) + ufs->drv_data->post_link(ufs); + + return 0; +} + +static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) +{ + struct device_node *np = dev->of_node; + struct exynos_ufs_uic_attr *attr; + int ret = 0; + + ufs->drv_data = device_get_match_data(dev); + + if (ufs->drv_data && ufs->drv_data->uic_attr) { + attr = ufs->drv_data->uic_attr; + } else { + dev_err(dev, "failed to get uic attributes\n"); + ret = -EINVAL; + goto out; + } + + ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg"); + if (IS_ERR(ufs->sysreg)) + ufs->sysreg = NULL; + else { + if (of_property_read_u32_index(np, "samsung,sysreg", 1, + &ufs->shareability_reg_offset)) { + dev_warn(dev, "can't get an offset from sysreg. Set to default value\n"); + ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET; + } + } + + ufs->pclk_avail_min = PCLK_AVAIL_MIN; + ufs->pclk_avail_max = PCLK_AVAIL_MAX; + + attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN; + attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL; + attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP; + attr->pa_granularity = PA_GRANULARITY_VAL; + attr->pa_tactivate = PA_TACTIVATE_VAL; + attr->pa_hibern8time = PA_HIBERN8TIME_VAL; + +out: + return ret; +} + +static inline void exynos_ufs_priv_init(struct ufs_hba *hba, + struct exynos_ufs *ufs) +{ + ufs->hba = hba; + ufs->opts = ufs->drv_data->opts; + ufs->rx_sel_idx = PA_MAXDATALANES; + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) + ufs->rx_sel_idx = 0; + hba->priv = (void *)ufs; + hba->quirks = ufs->drv_data->quirks; +} + +static int exynos_ufs_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + /* unipro */ + ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro"); + if (IS_ERR(ufs->reg_unipro)) { + dev_err(dev, "cannot ioremap for unipro register\n"); + return PTR_ERR(ufs->reg_unipro); + } + + /* ufs protector */ + ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp"); + if (IS_ERR(ufs->reg_ufsp)) { + dev_err(dev, "cannot ioremap for ufs protector register\n"); + return PTR_ERR(ufs->reg_ufsp); + } + + ret = exynos_ufs_parse_dt(dev, ufs); + if (ret) { + dev_err(dev, "failed to get dt info.\n"); + goto out; + } + + ufs->phy = devm_phy_get(dev, "ufs-phy"); + if (IS_ERR(ufs->phy)) { + ret = PTR_ERR(ufs->phy); + dev_err(dev, "failed to get ufs-phy\n"); + goto out; + } + + ret = phy_power_on(ufs->phy); + if (ret) + goto phy_off; + + exynos_ufs_priv_init(hba, ufs); + + if (ufs->drv_data->drv_init) { + ret = ufs->drv_data->drv_init(dev, ufs); + if (ret) { + dev_err(dev, "failed to init drv-data\n"); + goto out; + } + } + + ret = exynos_ufs_get_clk_info(ufs); + if (ret) + goto out; + exynos_ufs_specify_phy_time_attr(ufs); + exynos_ufs_config_smu(ufs); + return 0; + +phy_off: + phy_power_off(ufs->phy); +out: + hba->priv = NULL; + return ret; +} + +static int exynos_ufs_host_reset(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + unsigned long timeout = jiffies + msecs_to_jiffies(1); + u32 val; + int ret = 0; + + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); + + hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST); + + do { + if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK)) + goto out; + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "timeout host sw-reset\n"); + ret = -ETIMEDOUT; + +out: + exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); + return ret; +} + +static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + hci_writel(ufs, 0 << 0, HCI_GPIO_OUT); + udelay(5); + hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); +} + +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + if (!enter) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + + if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { + static const unsigned int granularity_tbl[] = { + 1, 4, 8, 16, 32, 100 + }; + int h8_time = attr->pa_hibern8time * + granularity_tbl[attr->pa_granularity - 1]; + unsigned long us; + s64 delta; + + do { + delta = h8_time - ktime_us_delta(ktime_get(), + ufs->entry_hibern8_t); + if (delta <= 0) + break; + + us = min_t(s64, delta, USEC_PER_MSEC); + if (us >= 10) + usleep_range(us, us + 10); + } while (1); + } + } +} + +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!enter) { + u32 cur_mode = 0; + u32 pwrmode; + + if (ufshcd_is_hs_mode(&ufs->dev_req_params)) + pwrmode = FAST_MODE; + else + pwrmode = SLOW_MODE; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); + if (cur_mode != (pwrmode << 4 | pwrmode)) { + dev_warn(hba->dev, "%s: power mode change\n", __func__); + hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; + hba->pwr_info.pwr_tx = cur_mode & 0xf; + ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + } + + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) + exynos_ufs_establish_connt(ufs); + } else { + ufs->entry_hibern8_t = ktime_get(); + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } +} + +static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + int ret = 0; + + switch (status) { + case PRE_CHANGE: + if (ufs->drv_data->pre_hce_enable) { + ret = ufs->drv_data->pre_hce_enable(ufs); + if (ret) + return ret; + } + + ret = exynos_ufs_host_reset(hba); + if (ret) + return ret; + exynos_ufs_dev_hw_reset(hba); + break; + case POST_CHANGE: + exynos_ufs_calc_pwm_clk_div(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + + if (ufs->drv_data->post_hce_enable) + ret = ufs->drv_data->post_hce_enable(ufs); + + break; + } + + return ret; +} + +static int exynos_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_pre_link(hba); + break; + case POST_CHANGE: + ret = exynos_ufs_post_link(hba); + break; + } + + return ret; +} + +static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params, + dev_req_params); + break; + case POST_CHANGE: + ret = exynos_ufs_post_pwr_mode(hba, dev_req_params); + break; + } + + return ret; +} + +static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, + enum uic_cmd_dme enter, + enum ufs_notify_change_status notify) +{ + switch ((u8)notify) { + case PRE_CHANGE: + exynos_ufs_pre_hibern8(hba, enter); + break; + case POST_CHANGE: + exynos_ufs_post_hibern8(hba, enter); + break; + } +} + +static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (status == PRE_CHANGE) + return 0; + + if (!ufshcd_is_link_active(hba)) + phy_power_off(ufs->phy); + + return 0; +} + +static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufshcd_is_link_active(hba)) + phy_power_on(ufs->phy); + + exynos_ufs_config_smu(ufs); + + return 0; +} + +static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status == POST_CHANGE) { + ufshcd_set_link_active(hba); + ufshcd_set_ufs_dev_active(hba); + } + + return 0; +} + +static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba) +{ + u32 mbox; + ktime_t start, stop; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS)); + + do { + mbox = ufshcd_readl(hba, PH2VH_MBOX); + /* TODO: Mailbox message protocols between the PH and VHs are + * not implemented yet. This will be supported later + */ + if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY) + return 0; + + usleep_range(40, 50); + } while (ktime_before(ktime_get(), stop)); + + return -ETIME; +} + +static int exynosauto_ufs_vh_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + ret = exynosauto_ufs_vh_wait_ph_ready(hba); + if (ret) + return ret; + + ufs->drv_data = device_get_match_data(dev); + if (!ufs->drv_data) + return -ENODEV; + + exynos_ufs_priv_init(hba, ufs); + + return 0; +} + +static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { + .name = "exynos_ufs", + .init = exynos_ufs_init, + .hce_enable_notify = exynos_ufs_hce_enable_notify, + .link_startup_notify = exynos_ufs_link_startup_notify, + .pwr_change_notify = exynos_ufs_pwr_change_notify, + .setup_clocks = exynos_ufs_setup_clocks, + .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, + .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req, + .hibern8_notify = exynos_ufs_hibern8_notify, + .suspend = exynos_ufs_suspend, + .resume = exynos_ufs_resume, +}; + +static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = { + .name = "exynosauto_ufs_vh", + .init = exynosauto_ufs_vh_init, + .link_startup_notify = exynosauto_ufs_vh_link_startup_notify, +}; + +static int exynos_ufs_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops; + const struct exynos_ufs_drv_data *drv_data = + device_get_match_data(dev); + + if (drv_data && drv_data->vops) + vops = drv_data->vops; + + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +static int exynos_ufs_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +static struct exynos_ufs_uic_attr exynos7_uic_attr = { + .tx_trailingclks = 0x10, + .tx_dif_p_nsec = 3000000, /* unit: ns */ + .tx_dif_n_nsec = 1000000, /* unit: ns */ + .tx_high_z_cnt_nsec = 20000, /* unit: ns */ + .tx_base_unit_nsec = 100000, /* unit: ns */ + .tx_gran_unit_nsec = 4000, /* unit: ns */ + .tx_sleep_cnt = 1000, /* unit: ns */ + .tx_min_activatetime = 0xa, + .rx_filler_enable = 0x2, + .rx_dif_p_nsec = 1000000, /* unit: ns */ + .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ + .rx_base_unit_nsec = 100000, /* unit: ns */ + .rx_gran_unit_nsec = 4000, /* unit: ns */ + .rx_sleep_cnt = 1280, /* unit: ns */ + .rx_stall_cnt = 320, /* unit: ns */ + .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), + .pa_dbg_option_suite = 0x30103, +}; + +static struct exynos_ufs_drv_data exynosauto_ufs_drvs = { + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, + .drv_init = exynosauto_ufs_drv_init, + .post_hce_enable = exynosauto_ufs_post_hce_enable, + .pre_link = exynosauto_ufs_pre_link, + .pre_pwr_change = exynosauto_ufs_pre_pwr_change, + .post_pwr_change = exynosauto_ufs_post_pwr_change, +}; + +static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = { + .vops = &ufs_hba_exynosauto_vh_ops, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCD_QUIRK_BROKEN_UIC_CMD | + UFSHCD_QUIRK_SKIP_PH_CONFIGURATION | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, + .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX, +}; + +static struct exynos_ufs_drv_data exynos_ufs_drvs = { + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | + UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | + UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING | + UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE, + .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | + EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | + EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, + .drv_init = exynos7_ufs_drv_init, + .pre_link = exynos7_ufs_pre_link, + .post_link = exynos7_ufs_post_link, + .pre_pwr_change = exynos7_ufs_pre_pwr_change, + .post_pwr_change = exynos7_ufs_post_pwr_change, +}; + +static const struct of_device_id exynos_ufs_of_match[] = { + { .compatible = "samsung,exynos7-ufs", + .data = &exynos_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs", + .data = &exynosauto_ufs_drvs }, + { .compatible = "samsung,exynosautov9-ufs-vh", + .data = &exynosauto_ufs_vh_drvs }, + {}, +}; + +static const struct dev_pm_ops exynos_ufs_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver exynos_ufs_pltform = { + .probe = exynos_ufs_probe, + .remove = exynos_ufs_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "exynos-ufshc", + .pm = &exynos_ufs_pm_ops, + .of_match_table = of_match_ptr(exynos_ufs_of_match), + }, +}; +module_platform_driver(exynos_ufs_pltform); + +MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>"); +MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>"); +MODULE_DESCRIPTION("Exynos UFS HCI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h new file mode 100644 index 000000000000..0b0a3d530ca6 --- /dev/null +++ b/drivers/ufs/host/ufs-exynos.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host Controller driver for Exynos specific extensions + * + * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. + * + */ + +#ifndef _UFS_EXYNOS_H_ +#define _UFS_EXYNOS_H_ + +/* + * UNIPRO registers + */ +#define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150 + +/* + * MIBs for PA debug registers + */ +#define PA_DBG_CLK_PERIOD 0x9514 +#define PA_DBG_TXPHY_CFGUPDT 0x9518 +#define PA_DBG_RXPHY_CFGUPDT 0x9519 +#define PA_DBG_MODE 0x9529 +#define PA_DBG_SKIP_RESET_PHY 0x9539 +#define PA_DBG_OV_TM 0x9540 +#define PA_DBG_SKIP_LINE_RESET 0x9541 +#define PA_DBG_LINE_RESET_REQ 0x9543 +#define PA_DBG_OPTION_SUITE 0x9564 +#define PA_DBG_OPTION_SUITE_DYN 0x9565 + +/* + * MIBs for Transport Layer debug registers + */ +#define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001 + +/* + * Exynos MPHY attributes + */ +#define TX_LINERESET_N_VAL 0x0277 +#define TX_LINERESET_N(v) (((v) >> 10) & 0xFF) +#define TX_LINERESET_P_VAL 0x027D +#define TX_LINERESET_P(v) (((v) >> 12) & 0xFF) +#define TX_OV_SLEEP_CNT_TIMER 0x028E +#define TX_OV_H8_ENTER_EN (1 << 7) +#define TX_OV_SLEEP_CNT(v) (((v) >> 5) & 0x7F) +#define TX_HIGH_Z_CNT_11_08 0x028C +#define TX_HIGH_Z_CNT_H(v) (((v) >> 8) & 0xF) +#define TX_HIGH_Z_CNT_07_00 0x028D +#define TX_HIGH_Z_CNT_L(v) ((v) & 0xFF) +#define TX_BASE_NVAL_07_00 0x0293 +#define TX_BASE_NVAL_L(v) ((v) & 0xFF) +#define TX_BASE_NVAL_15_08 0x0294 +#define TX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF) +#define TX_GRAN_NVAL_07_00 0x0295 +#define TX_GRAN_NVAL_L(v) ((v) & 0xFF) +#define TX_GRAN_NVAL_10_08 0x0296 +#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) + +#define VND_TX_CLK_PRD 0xAA +#define VND_TX_CLK_PRD_EN 0xA9 +#define VND_TX_LINERESET_PVALUE0 0xAD +#define VND_TX_LINERESET_PVALUE1 0xAC +#define VND_TX_LINERESET_PVALUE2 0xAB + +#define TX_LINE_RESET_TIME 3200 + +#define VND_RX_CLK_PRD 0x12 +#define VND_RX_CLK_PRD_EN 0x11 +#define VND_RX_LINERESET_VALUE0 0x1D +#define VND_RX_LINERESET_VALUE1 0x1C +#define VND_RX_LINERESET_VALUE2 0x1B + +#define RX_LINE_RESET_TIME 1000 + +#define RX_FILLER_ENABLE 0x0316 +#define RX_FILLER_EN (1 << 1) +#define RX_LINERESET_VAL 0x0317 +#define RX_LINERESET(v) (((v) >> 12) & 0xFF) +#define RX_LCC_IGNORE 0x0318 +#define RX_SYNC_MASK_LENGTH 0x0321 +#define RX_HIBERN8_WAIT_VAL_BIT_20_16 0x0331 +#define RX_HIBERN8_WAIT_VAL_BIT_15_08 0x0332 +#define RX_HIBERN8_WAIT_VAL_BIT_07_00 0x0333 +#define RX_OV_SLEEP_CNT_TIMER 0x0340 +#define RX_OV_SLEEP_CNT(v) (((v) >> 6) & 0x1F) +#define RX_OV_STALL_CNT_TIMER 0x0341 +#define RX_OV_STALL_CNT(v) (((v) >> 4) & 0xFF) +#define RX_BASE_NVAL_07_00 0x0355 +#define RX_BASE_NVAL_L(v) ((v) & 0xFF) +#define RX_BASE_NVAL_15_08 0x0354 +#define RX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF) +#define RX_GRAN_NVAL_07_00 0x0353 +#define RX_GRAN_NVAL_L(v) ((v) & 0xFF) +#define RX_GRAN_NVAL_10_08 0x0352 +#define RX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) + +#define CMN_PWM_CLK_CTRL 0x0402 +#define PWM_CLK_CTRL_MASK 0x3 + +#define IATOVAL_NSEC 20000 /* unit: ns */ +#define UNIPRO_PCLK_PERIOD(ufs) (NSEC_PER_SEC / ufs->pclk_rate) + +struct exynos_ufs; + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define RX_ADV_FINE_GRAN_SUP_EN 0x1 +#define RX_ADV_FINE_GRAN_STEP_VAL 0x3 +#define RX_ADV_MIN_ACTV_TIME_CAP 0x9 + +#define PA_GRANULARITY_VAL 0x6 +#define PA_TACTIVATE_VAL 0x3 +#define PA_HIBERN8TIME_VAL 0x20 + +#define PCLK_AVAIL_MIN 70000000 +#define PCLK_AVAIL_MAX 167000000 + +struct exynos_ufs_uic_attr { + /* TX Attributes */ + unsigned int tx_trailingclks; + unsigned int tx_dif_p_nsec; + unsigned int tx_dif_n_nsec; + unsigned int tx_high_z_cnt_nsec; + unsigned int tx_base_unit_nsec; + unsigned int tx_gran_unit_nsec; + unsigned int tx_sleep_cnt; + unsigned int tx_min_activatetime; + /* RX Attributes */ + unsigned int rx_filler_enable; + unsigned int rx_dif_p_nsec; + unsigned int rx_hibern8_wait_nsec; + unsigned int rx_base_unit_nsec; + unsigned int rx_gran_unit_nsec; + unsigned int rx_sleep_cnt; + unsigned int rx_stall_cnt; + unsigned int rx_hs_g1_sync_len_cap; + unsigned int rx_hs_g2_sync_len_cap; + unsigned int rx_hs_g3_sync_len_cap; + unsigned int rx_hs_g1_prep_sync_len_cap; + unsigned int rx_hs_g2_prep_sync_len_cap; + unsigned int rx_hs_g3_prep_sync_len_cap; + /* Common Attributes */ + unsigned int cmn_pwm_clk_ctrl; + /* Internal Attributes */ + unsigned int pa_dbg_option_suite; + /* Changeable Attributes */ + unsigned int rx_adv_fine_gran_sup_en; + unsigned int rx_adv_fine_gran_step; + unsigned int rx_min_actv_time_cap; + unsigned int rx_hibern8_time_cap; + unsigned int rx_adv_min_actv_time_cap; + unsigned int rx_adv_hibern8_time_cap; + unsigned int pa_granularity; + unsigned int pa_tactivate; + unsigned int pa_hibern8time; +}; + +struct exynos_ufs_drv_data { + const struct ufs_hba_variant_ops *vops; + struct exynos_ufs_uic_attr *uic_attr; + unsigned int quirks; + unsigned int opts; + /* SoC's specific operations */ + int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*pre_link)(struct exynos_ufs *ufs); + int (*post_link)(struct exynos_ufs *ufs); + int (*pre_pwr_change)(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr); + int (*post_pwr_change)(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr); + int (*pre_hce_enable)(struct exynos_ufs *ufs); + int (*post_hce_enable)(struct exynos_ufs *ufs); +}; + +struct ufs_phy_time_cfg { + u32 tx_linereset_p; + u32 tx_linereset_n; + u32 tx_high_z_cnt; + u32 tx_base_n_val; + u32 tx_gran_n_val; + u32 tx_sleep_cnt; + u32 rx_linereset; + u32 rx_hibern8_wait; + u32 rx_base_n_val; + u32 rx_gran_n_val; + u32 rx_sleep_cnt; + u32 rx_stall_cnt; +}; + +struct exynos_ufs { + struct ufs_hba *hba; + struct phy *phy; + void __iomem *reg_hci; + void __iomem *reg_unipro; + void __iomem *reg_ufsp; + struct clk *clk_hci_core; + struct clk *clk_unipro_main; + struct clk *clk_apb; + u32 pclk_rate; + u32 pclk_div; + u32 pclk_avail_min; + u32 pclk_avail_max; + unsigned long mclk_rate; + int avail_ln_rx; + int avail_ln_tx; + int rx_sel_idx; + struct ufs_pa_layer_attr dev_req_params; + struct ufs_phy_time_cfg t_cfg; + ktime_t entry_hibern8_t; + const struct exynos_ufs_drv_data *drv_data; + struct regmap *sysreg; + u32 shareability_reg_offset; + + u32 opts; +#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) +#define EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB BIT(1) +#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2) +#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3) +#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4) +#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5) +}; + +#define for_each_ufs_rx_lane(ufs, i) \ + for (i = (ufs)->rx_sel_idx; \ + i < (ufs)->rx_sel_idx + (ufs)->avail_ln_rx; i++) +#define for_each_ufs_tx_lane(ufs, i) \ + for (i = 0; i < (ufs)->avail_ln_tx; i++) + +#define EXYNOS_UFS_MMIO_FUNC(name) \ +static inline void name##_writel(struct exynos_ufs *ufs, u32 val, u32 reg)\ +{ \ + writel(val, ufs->reg_##name + reg); \ +} \ + \ +static inline u32 name##_readl(struct exynos_ufs *ufs, u32 reg) \ +{ \ + return readl(ufs->reg_##name + reg); \ +} + +EXYNOS_UFS_MMIO_FUNC(hci); +EXYNOS_UFS_MMIO_FUNC(unipro); +EXYNOS_UFS_MMIO_FUNC(ufsp); +#undef EXYNOS_UFS_MMIO_FUNC + +long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long); + +static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), true); +} + +static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), false); +} + +static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), true); +} + +static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), false); +} + +#endif /* _UFS_EXYNOS_H_ */ diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c new file mode 100644 index 000000000000..2eed13bc82ca --- /dev/null +++ b/drivers/ufs/host/ufs-hisi.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon Hixxxx UFS Driver + * + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + */ + +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <ufs/ufshcd.h> +#include "ufshcd-pltfrm.h" +#include <ufs/unipro.h> +#include "ufs-hisi.h" +#include <ufs/ufshci.h> +#include <ufs/ufs_quirks.h> + +static int ufs_hisi_check_hibern8(struct ufs_hba *hba) +{ + int err = 0; + u32 tx_fsm_val_0 = 0; + u32 tx_fsm_val_1 = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); + + do { + err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), + &tx_fsm_val_0); + err |= ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1); + if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 && + tx_fsm_val_1 == TX_FSM_HIBERN8)) + break; + + /* sleep for max. 200us */ + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + /* + * we might have scheduled out for long during polling so + * check the state again. + */ + if (time_after(jiffies, timeout)) { + err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), + &tx_fsm_val_0); + err |= ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1); + } + + if (err) { + dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", + __func__, err); + } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 || + tx_fsm_val_1 != TX_FSM_HIBERN8) { + err = -1; + dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n", + __func__, tx_fsm_val_0, tx_fsm_val_1); + } + + return err; +} + +static void ufs_hisi_clk_init(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); + if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN) + mdelay(1); + /* use abb clk */ + ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL); + ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN); + /* open mphy ref clk */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); +} + +static void ufs_hisi_soc_init(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + u32 reg; + + if (!IS_ERR(host->rst)) + reset_control_assert(host->rst); + + /* HC_PSW powerup */ + ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL); + udelay(10); + /* notify PWR ready */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL); + ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0, + UFS_DEVICE_RESET_CTRL); + + reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL); + reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK; + /* set cfg clk freq */ + ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL); + /* set ref clk freq */ + ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL); + /* bypass ufs clk gate */ + ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS, + CLOCK_GATE_BYPASS); + ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL); + + /* open psw clk */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL); + /* disable ufshc iso */ + ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL); + /* disable phy iso */ + ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN); + /* notice iso disable */ + ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL); + + /* disable lp_reset_n */ + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN); + mdelay(1); + + ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET, + UFS_DEVICE_RESET_CTRL); + + msleep(20); + + /* + * enable the fix of linereset recovery, + * and enable rx_reset/tx_rest beat + * enable ref_clk_en override(bit5) & + * override value = 1(bit4), with mask + */ + ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL); + + if (!IS_ERR(host->rst)) + reset_control_deassert(host->rst); +} + +static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + int err; + uint32_t value; + uint32_t reg; + + /* Unipro VS_mphy_disable */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1); + /* PA_HSSeries */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2); + /* MPHY CBRATESEL */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1); + /* MPHY CBOVRCTRL2 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D); + /* MPHY CBOVRCTRL3 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* MPHY CBOVRCTRL4 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98); + /* MPHY CBOVRCTRL5 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1); + } + + /* Unipro VS_MphyCfgUpdt */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + /* MPHY RXOVRCTRL4 rx0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58); + /* MPHY RXOVRCTRL4 rx1 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58); + /* MPHY RXOVRCTRL5 rx0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB); + /* MPHY RXOVRCTRL5 rx1 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB); + /* MPHY RXSQCONTROL rx0 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1); + /* MPHY RXSQCONTROL rx1 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1); + /* Unipro VS_MphyCfgUpdt */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* RX_Hibern8Time_Capability*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA); + /* RX_Hibern8Time_Capability*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA); + /* RX_Min_ActivateTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA); + /* RX_Min_ActivateTime*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA); + } else { + /* Tactive RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7); + /* Tactive RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7); + } + + /* Gear3 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F); + /* Gear3 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F); + /* Gear2 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F); + /* Gear2 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F); + /* Gear1 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F); + /* Gear1 Synclength */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F); + /* Thibernate Tx */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5); + /* Thibernate Tx */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5); + + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); + /* Unipro VS_mphy_disable */ + ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value); + if (value != 0x1) + dev_info(hba->dev, + "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value); + + /* Unipro VS_mphy_disable */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0); + err = ufs_hisi_check_hibern8(hba); + if (err) + dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n"); + + if (!(host->caps & UFS_HISI_CAP_PHY10nm)) + ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV); + + /* disable auto H8 */ + reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + reg = reg & (~UFS_AHIT_AH8ITV_MASK); + ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* Unipro PA_Local_TX_LCC_Enable */ + ufshcd_disable_host_tx_lcc(hba); + /* close Unipro VS_Mk2ExtnSupport */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0); + ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value); + if (value != 0) { + /* Ensure close success */ + dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n"); + } + + return err; +} + +static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + /* Unipro DL_AFC0CreditThreshold */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0); + /* Unipro DL_TC0OutAckThreshold */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0); + /* Unipro DL_TC0TXFCThreshold */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9); + + /* not bypass ufs clk gate */ + ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS, + CLOCK_GATE_BYPASS); + ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS, + UFS_SYSCTRL); + + /* select received symbol cnt */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000); + /* reset counter0 and enable */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005); + + return 0; +} + +static int ufs_hisi_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + err = ufs_hisi_link_startup_pre_change(hba); + break; + case POST_CHANGE: + err = ufs_hisi_link_startup_post_change(hba); + break; + default: + break; + } + + return err; +} + +static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param) +{ + ufshcd_init_pwr_dev_param(hisi_param); +} + +static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* + * Boston platform need to set SaveConfigTime to 0x13, + * and change sync length to maximum value + */ + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13); + /* g1 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f); + /* g2 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f); + /* g3 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f); + /* PA_Hibern8Time */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA); + /* PA_Tactivate */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01); + } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { + pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n"); + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10); + /* sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48); + } + + /* update */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); + /* PA_TxSkip */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0); + /*PA_PWRModeUserData0 = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191); + /*PA_PWRModeUserData1 = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535); + /*PA_PWRModeUserData2 = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767); + /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191); + /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535); + /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767); + /*PA_PWRModeUserData3 = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191); + /*PA_PWRModeUserData4 = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535); + /*PA_PWRModeUserData5 = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767); + /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191); + /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535); + /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); +} + +static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_dev_params ufs_hisi_cap; + int ret = 0; + + if (!dev_req_params) { + dev_err(hba->dev, + "%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + switch (status) { + case PRE_CHANGE: + ufs_hisi_set_dev_cap(&ufs_hisi_cap); + ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap, + dev_max_params, dev_req_params); + if (ret) { + dev_err(hba->dev, + "%s: failed to determine capabilities\n", __func__); + goto out; + } + + ufs_hisi_pwr_change_pre_change(hba); + break; + case POST_CHANGE: + break; + default: + ret = -EINVAL; + break; + } +out: + return ret; +} + +static int ufs_hisi_suspend_prepare(struct device *dev) +{ + /* RPM and SPM are different. Refer ufs_hisi_suspend() */ + return __ufshcd_suspend_prepare(dev, false); +} + +static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (status == PRE_CHANGE) + return 0; + + if (pm_op == UFS_RUNTIME_PM) + return 0; + + if (host->in_suspend) { + WARN_ON(1); + return 0; + } + + ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); + udelay(10); + /* set ref_dig_clk override of PHY PCS to 0 */ + ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL); + + host->in_suspend = true; + + return 0; +} + +static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (!host->in_suspend) + return 0; + + /* set ref_dig_clk override of PHY PCS to 1 */ + ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL); + udelay(10); + ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); + + host->in_suspend = false; + return 0; +} + +static int ufs_hisi_get_resource(struct ufs_hisi_host *host) +{ + struct device *dev = host->hba->dev; + struct platform_device *pdev = to_platform_device(dev); + + /* get resource of ufs sys ctrl */ + host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1); + return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl); +} + +static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba) +{ + hba->rpm_lvl = UFS_PM_LVL_1; + hba->spm_lvl = UFS_PM_LVL_3; +} + +/** + * ufs_hisi_init_common + * @hba: host controller instance + */ +static int ufs_hisi_init_common(struct ufs_hba *hba) +{ + int err = 0; + struct device *dev = hba->dev; + struct ufs_hisi_host *host; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->hba = hba; + ufshcd_set_variant(hba, host); + + host->rst = devm_reset_control_get(dev, "rst"); + if (IS_ERR(host->rst)) { + dev_err(dev, "%s: failed to get reset control\n", __func__); + err = PTR_ERR(host->rst); + goto error; + } + + ufs_hisi_set_pm_lvl(hba); + + err = ufs_hisi_get_resource(host); + if (err) + goto error; + + return 0; + +error: + ufshcd_set_variant(hba, NULL); + return err; +} + +static int ufs_hi3660_init(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + + ret = ufs_hisi_init_common(hba); + if (ret) { + dev_err(dev, "%s: ufs common init fail\n", __func__); + return ret; + } + + ufs_hisi_clk_init(hba); + + ufs_hisi_soc_init(hba); + + return 0; +} + +static int ufs_hi3670_init(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_hisi_host *host; + + ret = ufs_hisi_init_common(hba); + if (ret) { + dev_err(dev, "%s: ufs common init fail\n", __func__); + return ret; + } + + ufs_hisi_clk_init(hba); + + ufs_hisi_soc_init(hba); + + /* Add cap for 10nm PHY variant on HI3670 SoC */ + host = ufshcd_get_variant(hba); + host->caps |= UFS_HISI_CAP_PHY10nm; + + return 0; +} + +static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { + .name = "hi3660", + .init = ufs_hi3660_init, + .link_startup_notify = ufs_hisi_link_startup_notify, + .pwr_change_notify = ufs_hisi_pwr_change_notify, + .suspend = ufs_hisi_suspend, + .resume = ufs_hisi_resume, +}; + +static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { + .name = "hi3670", + .init = ufs_hi3670_init, + .link_startup_notify = ufs_hisi_link_startup_notify, + .pwr_change_notify = ufs_hisi_pwr_change_notify, + .suspend = ufs_hisi_suspend, + .resume = ufs_hisi_resume, +}; + +static const struct of_device_id ufs_hisi_of_match[] = { + { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops }, + { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops }, + {}, +}; + +MODULE_DEVICE_TABLE(of, ufs_hisi_of_match); + +static int ufs_hisi_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + + of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node); + + return ufshcd_pltfrm_init(pdev, of_id->data); +} + +static int ufs_hisi_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + ufshcd_remove(hba); + return 0; +} + +static const struct dev_pm_ops ufs_hisi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufs_hisi_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver ufs_hisi_pltform = { + .probe = ufs_hisi_probe, + .remove = ufs_hisi_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-hisi", + .pm = &ufs_hisi_pm_ops, + .of_match_table = of_match_ptr(ufs_hisi_of_match), + }, +}; +module_platform_driver(ufs_hisi_pltform); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ufshcd-hisi"); +MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver"); diff --git a/drivers/ufs/host/ufs-hisi.h b/drivers/ufs/host/ufs-hisi.h new file mode 100644 index 000000000000..5a90c0f4e90c --- /dev/null +++ b/drivers/ufs/host/ufs-hisi.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017, HiSilicon. All rights reserved. + */ + +#ifndef UFS_HISI_H_ +#define UFS_HISI_H_ + +#define HBRN8_POLL_TOUT_MS 1000 + +/* + * ufs sysctrl specific define + */ +#define PSW_POWER_CTRL (0x04) +#define PHY_ISO_EN (0x08) +#define HC_LP_CTRL (0x0C) +#define PHY_CLK_CTRL (0x10) +#define PSW_CLK_CTRL (0x14) +#define CLOCK_GATE_BYPASS (0x18) +#define RESET_CTRL_EN (0x1C) +#define UFS_SYSCTRL (0x5C) +#define UFS_DEVICE_RESET_CTRL (0x60) + +#define BIT_UFS_PSW_ISO_CTRL (1 << 16) +#define BIT_UFS_PSW_MTCMOS_EN (1 << 0) +#define BIT_UFS_REFCLK_ISO_EN (1 << 16) +#define BIT_UFS_PHY_ISO_CTRL (1 << 0) +#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16) +#define BIT_SYSCTRL_PWR_READY (1 << 8) +#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24) +#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8) +#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF) +#define UFS_FREQ_CFG_CLK (0x39) +#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4) +#define MASK_UFS_CLK_GATE_BYPASS (0x3F) +#define BIT_SYSCTRL_LP_RESET_N (1 << 0) +#define BIT_UFS_REFCLK_SRC_SEl (1 << 0) +#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16) +#define MASK_UFS_DEVICE_RESET (0x1 << 16) +#define BIT_UFS_DEVICE_RESET (0x1) + +/* + * M-TX Configuration Attributes for Hixxxx + */ +#define MPHY_TX_FSM_STATE 0x41 +#define TX_FSM_HIBERN8 0x1 + +/* + * Hixxxx UFS HC specific Registers + */ +enum { + UFS_REG_OCPTHRTL = 0xc0, + UFS_REG_OOCPR = 0xc4, + + UFS_REG_CDACFG = 0xd0, + UFS_REG_CDATX1 = 0xd4, + UFS_REG_CDATX2 = 0xd8, + UFS_REG_CDARX1 = 0xdc, + UFS_REG_CDARX2 = 0xe0, + UFS_REG_CDASTA = 0xe4, + + UFS_REG_LBMCFG = 0xf0, + UFS_REG_LBMSTA = 0xf4, + UFS_REG_UFSMODE = 0xf8, + + UFS_REG_HCLKDIV = 0xfc, +}; + +/* AHIT - Auto-Hibernate Idle Timer */ +#define UFS_AHIT_AH8ITV_MASK 0x3FF + +/* REG UFS_REG_OCPTHRTL definition */ +#define UFS_HCLKDIV_NORMAL_VALUE 0xE4 + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define UFS_HISI_CAP_RESERVED BIT(0) +#define UFS_HISI_CAP_PHY10nm BIT(1) + +struct ufs_hisi_host { + struct ufs_hba *hba; + void __iomem *ufs_sys_ctrl; + + struct reset_control *rst; + + uint64_t caps; + + bool in_suspend; +}; + +#define ufs_sys_ctrl_writel(host, val, reg) \ + writel((val), (host)->ufs_sys_ctrl + (reg)) +#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg)) +#define ufs_sys_ctrl_set_bits(host, mask, reg) \ + ufs_sys_ctrl_writel( \ + (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg)) +#define ufs_sys_ctrl_clr_bits(host, mask, reg) \ + ufs_sys_ctrl_writel((host), \ + ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \ + (reg)) + +#endif /* UFS_HISI_H_ */ diff --git a/drivers/ufs/host/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h new file mode 100644 index 000000000000..7e010848dc99 --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek-trace.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 MediaTek Inc. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ufs_mtk + +#if !defined(_TRACE_EVENT_UFS_MEDIATEK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENT_UFS_MEDIATEK_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(ufs_mtk_event, + TP_PROTO(unsigned int type, unsigned int data), + TP_ARGS(type, data), + + TP_STRUCT__entry( + __field(unsigned int, type) + __field(unsigned int, data) + ), + + TP_fast_assign( + __entry->type = type; + __entry->data = data; + ), + + TP_printk("ufs:event=%u data=%u", + __entry->type, __entry->data) + ); +#endif + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/ufs/host +#define TRACE_INCLUDE_FILE ufs-mediatek-trace +#include <trace/define_trace.h> diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c new file mode 100644 index 000000000000..beabc3ccd30b --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek.c @@ -0,0 +1,1246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 MediaTek Inc. + * Authors: + * Stanley Chu <stanley.chu@mediatek.com> + * Peter Wang <peter.wang@mediatek.com> + */ + +#include <linux/arm-smccc.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/sched/clock.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> + +#include <ufs/ufshcd.h> +#include "ufshcd-pltfrm.h" +#include <ufs/ufs_quirks.h> +#include <ufs/unipro.h> +#include "ufs-mediatek.h" + +#define CREATE_TRACE_POINTS +#include "ufs-mediatek-trace.h" + +#define ufs_mtk_smc(cmd, val, res) \ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ + cmd, val, 0, 0, 0, 0, 0, &(res)) + +#define ufs_mtk_va09_pwr_ctrl(res, on) \ + ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res) + +#define ufs_mtk_crypto_ctrl(res, enable) \ + ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res) + +#define ufs_mtk_ref_clk_notify(on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) + +static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { + { .wmanufacturerid = UFS_VENDOR_MICRON, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = "H9HQ21AFAMZDAR", + .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES }, + {} +}; + +static const struct of_device_id ufs_mtk_of_match[] = { + { .compatible = "mediatek,mt8183-ufshci" }, + {}, +}; + +static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); +} + +static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); +} + +static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); +} + +static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) +{ + u32 tmp; + + if (enable) { + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + tmp = tmp | + (1 << RX_SYMBOL_CLK_GATE_EN) | + (1 << SYS_CLK_GATE_EN) | + (1 << TX_CLK_GATE_EN); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); + tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); + } else { + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) | + (1 << SYS_CLK_GATE_EN) | + (1 << TX_CLK_GATE_EN)); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + ufshcd_dme_get(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); + tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE); + ufshcd_dme_set(hba, + UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); + } +} + +static void ufs_mtk_crypto_enable(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_crypto_ctrl(res, 1); + if (res.a0) { + dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", + __func__, res.a0); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + } +} + +static void ufs_mtk_host_reset(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + reset_control_assert(host->hci_reset); + reset_control_assert(host->crypto_reset); + reset_control_assert(host->unipro_reset); + + usleep_range(100, 110); + + reset_control_deassert(host->unipro_reset); + reset_control_deassert(host->crypto_reset); + reset_control_deassert(host->hci_reset); +} + +static void ufs_mtk_init_reset_control(struct ufs_hba *hba, + struct reset_control **rc, + char *str) +{ + *rc = devm_reset_control_get(hba->dev, str); + if (IS_ERR(*rc)) { + dev_info(hba->dev, "Failed to get reset control %s: %ld\n", + str, PTR_ERR(*rc)); + *rc = NULL; + } +} + +static void ufs_mtk_init_reset(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + ufs_mtk_init_reset_control(hba, &host->hci_reset, + "hci_rst"); + ufs_mtk_init_reset_control(hba, &host->unipro_reset, + "unipro_rst"); + ufs_mtk_init_reset_control(hba, &host->crypto_reset, + "crypto_rst"); +} + +static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (status == PRE_CHANGE) { + if (host->unipro_lpm) { + hba->vps->hba_enable_delay_us = 0; + } else { + hba->vps->hba_enable_delay_us = 600; + ufs_mtk_host_reset(hba); + } + + if (hba->caps & UFSHCD_CAP_CRYPTO) + ufs_mtk_crypto_enable(hba); + + if (host->caps & UFS_MTK_CAP_DISABLE_AH8) { + ufshcd_writel(hba, 0, + REG_AUTO_HIBERNATE_IDLE_TIMER); + hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; + hba->ahit = 0; + } + } + + return 0; +} + +static int ufs_mtk_bind_mphy(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct device *dev = hba->dev; + struct device_node *np = dev->of_node; + int err = 0; + + host->mphy = devm_of_phy_get_by_index(dev, np, 0); + + if (host->mphy == ERR_PTR(-EPROBE_DEFER)) { + /* + * UFS driver might be probed before the phy driver does. + * In that case we would like to return EPROBE_DEFER code. + */ + err = -EPROBE_DEFER; + dev_info(dev, + "%s: required phy hasn't probed yet. err = %d\n", + __func__, err); + } else if (IS_ERR(host->mphy)) { + err = PTR_ERR(host->mphy); + if (err != -ENODEV) { + dev_info(dev, "%s: PHY get failed %d\n", __func__, + err); + } + } + + if (err) + host->mphy = NULL; + /* + * Allow unbound mphy because not every platform needs specific + * mphy control. + */ + if (err == -ENODEV) + err = 0; + + return err; +} + +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + ktime_t timeout, time_checked; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + if (on) { + ufs_mtk_ref_clk_notify(on, res); + ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); + } else { + ufshcd_delay_us(host->ref_clk_gating_wait_us, 10); + ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); + } + + /* Wait for ack */ + timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US); + do { + time_checked = ktime_get(); + value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); + + /* Wait until ack bit equals to req bit */ + if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) + goto out; + + usleep_range(100, 200); + } while (ktime_before(time_checked, timeout)); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); + + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); + + return -ETIMEDOUT; + +out: + host->ref_clk_enabled = on; + if (on) + ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10); + else + ufs_mtk_ref_clk_notify(on, res); + + return 0; +} + +static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, + u16 gating_us) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (hba->dev_info.clk_gating_wait_us) { + host->ref_clk_gating_wait_us = + hba->dev_info.clk_gating_wait_us; + } else { + host->ref_clk_gating_wait_us = gating_us; + } + + host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US; +} + +static void ufs_mtk_dbg_sel(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { + ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); + ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); + ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); + ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2); + ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3); + } else { + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); + } +} + +static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, + unsigned long retry_ms) +{ + u64 timeout, time_checked; + u32 val, sm; + bool wait_idle; + + /* cannot use plain ktime_get() in suspend */ + timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; + + /* wait a specific time after check base */ + udelay(10); + wait_idle = false; + + do { + time_checked = ktime_get_mono_fast_ns(); + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + + sm = val & 0x1f; + + /* + * if state is in H8 enter and H8 enter confirm + * wait until return to idle state. + */ + if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) { + wait_idle = true; + udelay(50); + continue; + } else if (!wait_idle) + break; + + if (wait_idle && (sm == VS_HCE_BASE)) + break; + } while (time_checked < timeout); + + if (wait_idle && sm != VS_HCE_BASE) + dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); +} + +static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, + unsigned long max_wait_ms) +{ + ktime_t timeout, time_checked; + u32 val; + + timeout = ktime_add_ms(ktime_get(), max_wait_ms); + do { + time_checked = ktime_get(); + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + val = val >> 28; + + if (val == state) + return 0; + + /* Sleep for max. 200us */ + usleep_range(100, 200); + } while (ktime_before(time_checked, timeout)); + + if (val == state) + return 0; + + return -ETIMEDOUT; +} + +static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct phy *mphy = host->mphy; + struct arm_smccc_res res; + int ret = 0; + + if (!mphy || !(on ^ host->mphy_powered_on)) + return 0; + + if (on) { + if (ufs_mtk_is_va09_supported(hba)) { + ret = regulator_enable(host->reg_va09); + if (ret < 0) + goto out; + /* wait 200 us to stablize VA09 */ + usleep_range(200, 210); + ufs_mtk_va09_pwr_ctrl(res, 1); + } + phy_power_on(mphy); + } else { + phy_power_off(mphy); + if (ufs_mtk_is_va09_supported(hba)) { + ufs_mtk_va09_pwr_ctrl(res, 0); + ret = regulator_disable(host->reg_va09); + if (ret < 0) + goto out; + } + } +out: + if (ret) { + dev_info(hba->dev, + "failed to %s va09: %d\n", + on ? "enable" : "disable", + ret); + } else { + host->mphy_powered_on = on; + } + + return ret; +} + +static int ufs_mtk_get_host_clk(struct device *dev, const char *name, + struct clk **clk_out) +{ + struct clk *clk; + int err = 0; + + clk = devm_clk_get(dev, name); + if (IS_ERR(clk)) + err = PTR_ERR(clk); + else + *clk_out = clk; + + return err; +} + +static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_crypt_cfg *cfg; + struct regulator *reg; + int volt, ret; + + if (!ufs_mtk_is_boost_crypt_enabled(hba)) + return; + + cfg = host->crypt; + volt = cfg->vcore_volt; + reg = cfg->reg_vcore; + + ret = clk_prepare_enable(cfg->clk_crypt_mux); + if (ret) { + dev_info(hba->dev, "clk_prepare_enable(): %d\n", + ret); + return; + } + + if (boost) { + ret = regulator_set_voltage(reg, volt, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to %d\n", volt); + goto out; + } + + ret = clk_set_parent(cfg->clk_crypt_mux, + cfg->clk_crypt_perf); + if (ret) { + dev_info(hba->dev, + "failed to set clk_crypt_perf\n"); + regulator_set_voltage(reg, 0, INT_MAX); + goto out; + } + } else { + ret = clk_set_parent(cfg->clk_crypt_mux, + cfg->clk_crypt_lp); + if (ret) { + dev_info(hba->dev, + "failed to set clk_crypt_lp\n"); + goto out; + } + + ret = regulator_set_voltage(reg, 0, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to MIN\n"); + } + } +out: + clk_disable_unprepare(cfg->clk_crypt_mux); +} + +static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name, + struct clk **clk) +{ + int ret; + + ret = ufs_mtk_get_host_clk(hba->dev, name, clk); + if (ret) { + dev_info(hba->dev, "%s: failed to get %s: %d", __func__, + name, ret); + } + + return ret; +} + +static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_crypt_cfg *cfg; + struct device *dev = hba->dev; + struct regulator *reg; + u32 volt; + + host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)), + GFP_KERNEL); + if (!host->crypt) + goto disable_caps; + + reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); + if (IS_ERR(reg)) { + dev_info(dev, "failed to get dvfsrc-vcore: %ld", + PTR_ERR(reg)); + goto disable_caps; + } + + if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min", + &volt)) { + dev_info(dev, "failed to get boost-crypt-vcore-min"); + goto disable_caps; + } + + cfg = host->crypt; + if (ufs_mtk_init_host_clk(hba, "crypt_mux", + &cfg->clk_crypt_mux)) + goto disable_caps; + + if (ufs_mtk_init_host_clk(hba, "crypt_lp", + &cfg->clk_crypt_lp)) + goto disable_caps; + + if (ufs_mtk_init_host_clk(hba, "crypt_perf", + &cfg->clk_crypt_perf)) + goto disable_caps; + + cfg->reg_vcore = reg; + cfg->vcore_volt = volt; + host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE; + +disable_caps: + return; +} + +static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + host->reg_va09 = regulator_get(hba->dev, "va09"); + if (IS_ERR(host->reg_va09)) + dev_info(hba->dev, "failed to get va09"); + else + host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL; +} + +static void ufs_mtk_init_host_caps(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct device_node *np = hba->dev->of_node; + + if (of_property_read_bool(np, "mediatek,ufs-boost-crypt")) + ufs_mtk_init_boost_crypt(hba); + + if (of_property_read_bool(np, "mediatek,ufs-support-va09")) + ufs_mtk_init_va09_pwr_ctrl(hba); + + if (of_property_read_bool(np, "mediatek,ufs-disable-ah8")) + host->caps |= UFS_MTK_CAP_DISABLE_AH8; + + if (of_property_read_bool(np, "mediatek,ufs-broken-vcc")) + host->caps |= UFS_MTK_CAP_BROKEN_VCC; + + dev_info(hba->dev, "caps: 0x%x", host->caps); +} + +static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + ufs_mtk_boost_crypt(hba, up); + ufs_mtk_setup_ref_clk(hba, up); + + if (up) + phy_power_on(host->mphy); + else + phy_power_off(host->mphy); +} + +/** + * ufs_mtk_setup_clocks - enables/disable clocks + * @hba: host controller instance + * @on: If true, enable clocks else disable them. + * @status: PRE_CHANGE or POST_CHANGE notify + * + * Returns 0 on success, non-zero on failure. + */ +static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + bool clk_pwr_off = false; + int ret = 0; + + /* + * In case ufs_mtk_init() is not yet done, simply ignore. + * This ufs_mtk_setup_clocks() shall be called from + * ufs_mtk_init() after init is done. + */ + if (!host) + return 0; + + if (!on && status == PRE_CHANGE) { + if (ufshcd_is_link_off(hba)) { + clk_pwr_off = true; + } else if (ufshcd_is_link_hibern8(hba) || + (!ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_auto_hibern8_enabled(hba))) { + /* + * Gate ref-clk and poweroff mphy if link state is in + * OFF or Hibern8 by either Auto-Hibern8 or + * ufshcd_link_state_transition(). + */ + ret = ufs_mtk_wait_link_state(hba, + VS_LINK_HIBERN8, + 15); + if (!ret) + clk_pwr_off = true; + } + + if (clk_pwr_off) + ufs_mtk_scale_perf(hba, false); + } else if (on && status == POST_CHANGE) { + ufs_mtk_scale_perf(hba, true); + } + + return ret; +} + +static void ufs_mtk_get_controller_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + int ret, ver = 0; + + if (host->hw_ver.major) + return; + + /* Set default (minimum) version anyway */ + host->hw_ver.major = 2; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver); + if (!ret) { + if (ver >= UFS_UNIPRO_VER_1_8) { + host->hw_ver.major = 3; + /* + * Fix HCI version for some platforms with + * incorrect version + */ + if (hba->ufs_version < ufshci_version(3, 0)) + hba->ufs_version = ufshci_version(3, 0); + } + } +} + +static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba) +{ + return hba->ufs_version; +} + +/** + * ufs_mtk_init - find other essential mmio bases + * @hba: host controller instance + * + * Binds PHY with controller and powers up PHY enabling clocks + * and regulators. + * + * Returns -EPROBE_DEFER if binding fails, returns negative error + * on phy power up failure and returns zero on success. + */ +static int ufs_mtk_init(struct ufs_hba *hba) +{ + const struct of_device_id *id; + struct device *dev = hba->dev; + struct ufs_mtk_host *host; + int err = 0; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) { + err = -ENOMEM; + dev_info(dev, "%s: no memory for mtk ufs host\n", __func__); + goto out; + } + + host->hba = hba; + ufshcd_set_variant(hba, host); + + id = of_match_device(ufs_mtk_of_match, dev); + if (!id) { + err = -EINVAL; + goto out; + } + + /* Initialize host capability */ + ufs_mtk_init_host_caps(hba); + + err = ufs_mtk_bind_mphy(hba); + if (err) + goto out_variant_clear; + + ufs_mtk_init_reset(hba); + + /* Enable runtime autosuspend */ + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + /* Enable clock-gating */ + hba->caps |= UFSHCD_CAP_CLK_GATING; + + /* Enable inline encryption */ + hba->caps |= UFSHCD_CAP_CRYPTO; + + /* Enable WriteBooster */ + hba->caps |= UFSHCD_CAP_WB_EN; + hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); + + if (host->caps & UFS_MTK_CAP_DISABLE_AH8) + hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + + /* + * ufshcd_vops_init() is invoked after + * ufshcd_setup_clock(true) in ufshcd_hba_init() thus + * phy clock setup is skipped. + * + * Enable phy clocks specifically here. + */ + ufs_mtk_mphy_power_on(hba, true); + ufs_mtk_setup_clocks(hba, true, POST_CHANGE); + + host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + goto out; + +out_variant_clear: + ufshcd_set_variant(hba, NULL); +out: + return err; +} + +static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_dev_params host_cap; + int ret; + + ufshcd_init_pwr_dev_param(&host_cap); + host_cap.hs_rx_gear = UFS_HS_G4; + host_cap.hs_tx_gear = UFS_HS_G4; + + ret = ufshcd_get_pwr_dev_param(&host_cap, + dev_max_params, + dev_req_params); + if (ret) { + pr_info("%s: failed to determine capabilities\n", + __func__); + } + + if (host->hw_ver.major >= 3) { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } + + return ret; +} + +static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status stage, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int ret = 0; + + switch (stage) { + case PRE_CHANGE: + ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, + dev_req_params); + break; + case POST_CHANGE: + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm) +{ + int ret; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + lpm ? 1 : 0); + if (!ret || !lpm) { + /* + * Forcibly set as non-LPM mode if UIC commands is failed + * to use default hba_enable_delay_us value for re-enabling + * the host. + */ + host->unipro_lpm = lpm; + } + + return ret; +} + +static int ufs_mtk_pre_link(struct ufs_hba *hba) +{ + int ret; + u32 tmp; + + ufs_mtk_get_controller_version(hba); + + ret = ufs_mtk_unipro_set_lpm(hba, false); + if (ret) + return ret; + + /* + * Setting PA_Local_TX_LCC_Enable to 0 before link startup + * to make sure that both host and device TX LCC are disabled + * once link startup is completed. + */ + ret = ufshcd_disable_host_tx_lcc(hba); + if (ret) + return ret; + + /* disable deep stall */ + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); + if (ret) + return ret; + + tmp &= ~(1 << 6); + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + + return ret; +} + +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + u32 ah_ms; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) + ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + else + ah_ms = 10; + ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); + } +} + +static int ufs_mtk_post_link(struct ufs_hba *hba) +{ + /* enable unipro clock gating feature */ + ufs_mtk_cfg_unipro_cg(hba, true); + + /* will be configured during probe hba */ + if (ufshcd_is_auto_hibern8_supported(hba)) + hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); + + ufs_mtk_setup_clk_gating(hba); + + return 0; +} + +static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status stage) +{ + int ret = 0; + + switch (stage) { + case PRE_CHANGE: + ret = ufs_mtk_pre_link(hba); + break; + case POST_CHANGE: + ret = ufs_mtk_post_link(hba); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + /* disable hba before device reset */ + ufshcd_hba_stop(hba); + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n + * pulse width. + * + * To be on safe side, keep the reset low for at least 10us. + */ + usleep_range(10, 15); + + ufs_mtk_device_reset_ctrl(1, res); + + /* Some devices may need time to respond to rst_n */ + usleep_range(10000, 15000); + + dev_info(hba->dev, "device reset done\n"); + + return 0; +} + +static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_hba_enable(hba); + if (err) + return err; + + err = ufs_mtk_unipro_set_lpm(hba, false); + if (err) + return err; + + err = ufshcd_uic_hibern8_exit(hba); + if (!err) + ufshcd_set_link_active(hba); + else + return err; + + err = ufshcd_make_hba_operational(hba); + if (err) + return err; + + return 0; +} + +static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) +{ + int err; + + err = ufs_mtk_unipro_set_lpm(hba, true); + if (err) { + /* Resume UniPro state for following error recovery */ + ufs_mtk_unipro_set_lpm(hba, false); + return err; + } + + return 0; +} + +static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm) +{ + if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) + return; + + if (lpm && !hba->vreg_info.vcc->enabled) + regulator_set_mode(hba->vreg_info.vccq2->reg, + REGULATOR_MODE_IDLE); + else if (!lpm) + regulator_set_mode(hba->vreg_info.vccq2->reg, + REGULATOR_MODE_NORMAL); +} + +static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) +{ + int ret; + + /* disable auto-hibern8 */ + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* wait host return to idle state when auto-hibern8 off */ + ufs_mtk_wait_idle_state(hba, 5); + + ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + if (ret) + dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); +} + +static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + int err; + struct arm_smccc_res res; + + if (status == PRE_CHANGE) { + if (!ufshcd_is_auto_hibern8_supported(hba)) + return 0; + ufs_mtk_auto_hibern8_disable(hba); + return 0; + } + + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_lpm(hba); + if (err) + goto fail; + } + + if (!ufshcd_is_link_active(hba)) { + /* + * Make sure no error will be returned to prevent + * ufshcd_suspend() re-enabling regulators while vreg is still + * in low-power mode. + */ + ufs_mtk_vreg_set_lpm(hba, true); + err = ufs_mtk_mphy_power_on(hba, false); + if (err) + goto fail; + } + + if (ufshcd_is_link_off(hba)) + ufs_mtk_device_reset_ctrl(0, res); + + return 0; +fail: + /* + * Set link as off state enforcedly to trigger + * ufshcd_host_reset_and_restore() in ufshcd_suspend() + * for completed host reset. + */ + ufshcd_set_link_off(hba); + return -EAGAIN; +} + +static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + int err; + + err = ufs_mtk_mphy_power_on(hba, true); + if (err) + goto fail; + + ufs_mtk_vreg_set_lpm(hba, false); + + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_hpm(hba); + if (err) + goto fail; + } + + return 0; +fail: + return ufshcd_link_recovery(hba); +} + +static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl "); + + ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); + + ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, + REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4, + "MPHY Ctrl "); + + /* Direct debugging information to REG_MTK_PROBE */ + ufs_mtk_dbg_sel(hba); + ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); +} + +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u16 mid = dev_info->wmanufacturerid; + + if (mid == UFS_VENDOR_SAMSUNG) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); + + /* + * Decide waiting time before gating reference clock and + * after ungating reference clock according to vendors' + * requirements. + */ + if (mid == UFS_VENDOR_SAMSUNG) + ufs_mtk_setup_ref_clk_wait_us(hba, 1); + else if (mid == UFS_VENDOR_SKHYNIX) + ufs_mtk_setup_ref_clk_wait_us(hba, 30); + else if (mid == UFS_VENDOR_TOSHIBA) + ufs_mtk_setup_ref_clk_wait_us(hba, 100); + else + ufs_mtk_setup_ref_clk_wait_us(hba, + REFCLK_DEFAULT_WAIT_US); + + return 0; +} + +static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) +{ + ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); + + if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && + (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { + hba->vreg_info.vcc->always_on = true; + /* + * VCC will be kept always-on thus we don't + * need any delay during regulator operations + */ + hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); + } +} + +static void ufs_mtk_event_notify(struct ufs_hba *hba, + enum ufs_event_type evt, void *data) +{ + unsigned int val = *(u32 *)data; + + trace_ufs_mtk_event(evt, val); +} + +/* + * struct ufs_hba_mtk_vops - UFS MTK specific variant operations + * + * The variant operations configure the necessary controller and PHY + * handshake during initialization. + */ +static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { + .name = "mediatek.ufshci", + .init = ufs_mtk_init, + .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version, + .setup_clocks = ufs_mtk_setup_clocks, + .hce_enable_notify = ufs_mtk_hce_enable_notify, + .link_startup_notify = ufs_mtk_link_startup_notify, + .pwr_change_notify = ufs_mtk_pwr_change_notify, + .apply_dev_quirks = ufs_mtk_apply_dev_quirks, + .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks, + .suspend = ufs_mtk_suspend, + .resume = ufs_mtk_resume, + .dbg_register_dump = ufs_mtk_dbg_register_dump, + .device_reset = ufs_mtk_device_reset, + .event_notify = ufs_mtk_event_notify, +}; + +/** + * ufs_mtk_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Return zero for success and non-zero for failure + */ +static int ufs_mtk_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + struct device_node *reset_node; + struct platform_device *reset_pdev; + struct device_link *link; + + reset_node = of_find_compatible_node(NULL, NULL, + "ti,syscon-reset"); + if (!reset_node) { + dev_notice(dev, "find ti,syscon-reset fail\n"); + goto skip_reset; + } + reset_pdev = of_find_device_by_node(reset_node); + if (!reset_pdev) { + dev_notice(dev, "find reset_pdev fail\n"); + goto skip_reset; + } + link = device_link_add(dev, &reset_pdev->dev, + DL_FLAG_AUTOPROBE_CONSUMER); + put_device(&reset_pdev->dev); + if (!link) { + dev_notice(dev, "add reset device_link fail\n"); + goto skip_reset; + } + /* supplier is not probed */ + if (link->status == DL_STATE_DORMANT) { + err = -EPROBE_DEFER; + goto out; + } + +skip_reset: + /* perform generic probe */ + err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); + +out: + if (err) + dev_info(dev, "probe failed %d\n", err); + + of_node_put(reset_node); + return err; +} + +/** + * ufs_mtk_remove - set driver_data of the device to NULL + * @pdev: pointer to platform device handle + * + * Always return 0 + */ +static int ufs_mtk_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +static const struct dev_pm_ops ufs_mtk_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver ufs_mtk_pltform = { + .probe = ufs_mtk_probe, + .remove = ufs_mtk_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-mtk", + .pm = &ufs_mtk_pm_ops, + .of_match_table = ufs_mtk_of_match, + }, +}; + +MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>"); +MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek UFS Host Driver"); +MODULE_LICENSE("GPL v2"); + +module_platform_driver(ufs_mtk_pltform); diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h new file mode 100644 index 000000000000..414dca86c09f --- /dev/null +++ b/drivers/ufs/host/ufs-mediatek.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + */ + +#ifndef _UFS_MEDIATEK_H +#define _UFS_MEDIATEK_H + +#include <linux/bitops.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> + +/* + * Vendor specific UFSHCI Registers + */ +#define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_EXTREG 0x2100 +#define REG_UFS_MPHYCTRL 0x2200 +#define REG_UFS_MTK_IP_VER 0x2240 +#define REG_UFS_REJECT_MON 0x22AC +#define REG_UFS_DEBUG_SEL 0x22C0 +#define REG_UFS_PROBE 0x22C8 +#define REG_UFS_DEBUG_SEL_B0 0x22D0 +#define REG_UFS_DEBUG_SEL_B1 0x22D4 +#define REG_UFS_DEBUG_SEL_B2 0x22D8 +#define REG_UFS_DEBUG_SEL_B3 0x22DC + +/* + * Ref-clk control + * + * Values for register REG_UFS_REFCLK_CTRL + */ +#define REFCLK_RELEASE 0x0 +#define REFCLK_REQUEST BIT(0) +#define REFCLK_ACK BIT(1) + +#define REFCLK_REQ_TIMEOUT_US 3000 +#define REFCLK_DEFAULT_WAIT_US 32 + +/* + * Other attributes + */ +#define VS_DEBUGCLOCKENABLE 0xD0A1 +#define VS_SAVEPOWERCONTROL 0xD0A6 +#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8 + +/* + * Vendor specific link state + */ +enum { + VS_LINK_DISABLED = 0, + VS_LINK_DOWN = 1, + VS_LINK_UP = 2, + VS_LINK_HIBERN8 = 3, + VS_LINK_LOST = 4, + VS_LINK_CFG = 5, +}; + +/* + * Vendor specific host controller state + */ +enum { + VS_HCE_RESET = 0, + VS_HCE_BASE = 1, + VS_HCE_OOCPR_WAIT = 2, + VS_HCE_DME_RESET = 3, + VS_HCE_MIDDLE = 4, + VS_HCE_DME_ENABLE = 5, + VS_HCE_DEFAULTS = 6, + VS_HIB_IDLEEN = 7, + VS_HIB_ENTER = 8, + VS_HIB_ENTER_CONF = 9, + VS_HIB_MIDDLE = 10, + VS_HIB_WAITTIMER = 11, + VS_HIB_EXIT_CONF = 12, + VS_HIB_EXIT = 13, +}; + +/* + * SiP commands + */ +#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) +#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) + +/* + * VS_DEBUGCLOCKENABLE + */ +enum { + TX_SYMBOL_CLK_REQ_FORCE = 5, +}; + +/* + * VS_SAVEPOWERCONTROL + */ +enum { + RX_SYMBOL_CLK_GATE_EN = 0, + SYS_CLK_GATE_EN = 2, + TX_CLK_GATE_EN = 3, +}; + +/* + * Host capability + */ +enum ufs_mtk_host_caps { + UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0, + UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, + UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, + UFS_MTK_CAP_BROKEN_VCC = 1 << 3, +}; + +struct ufs_mtk_crypt_cfg { + struct regulator *reg_vcore; + struct clk *clk_crypt_perf; + struct clk *clk_crypt_mux; + struct clk *clk_crypt_lp; + int vcore_volt; +}; + +struct ufs_mtk_hw_ver { + u8 step; + u8 minor; + u8 major; +}; + +struct ufs_mtk_host { + struct phy *mphy; + struct regulator *reg_va09; + struct reset_control *hci_reset; + struct reset_control *unipro_reset; + struct reset_control *crypto_reset; + struct ufs_hba *hba; + struct ufs_mtk_crypt_cfg *crypt; + struct ufs_mtk_hw_ver hw_ver; + enum ufs_mtk_host_caps caps; + bool mphy_powered_on; + bool unipro_lpm; + bool ref_clk_enabled; + u16 ref_clk_ungating_wait_us; + u16 ref_clk_gating_wait_us; + u32 ip_ver; +}; + +#endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c new file mode 100644 index 000000000000..745e48ec598f --- /dev/null +++ b/drivers/ufs/host/ufs-qcom-ice.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm ICE (Inline Crypto Engine) support. + * + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright 2019 Google LLC + */ + +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/qcom_scm.h> + +#include "ufs-qcom.h" + +#define AES_256_XTS_KEY_SIZE 64 + +/* QCOM ICE registers */ + +#define QCOM_ICE_REG_CONTROL 0x0000 +#define QCOM_ICE_REG_RESET 0x0004 +#define QCOM_ICE_REG_VERSION 0x0008 +#define QCOM_ICE_REG_FUSE_SETTING 0x0010 +#define QCOM_ICE_REG_PARAMETERS_1 0x0014 +#define QCOM_ICE_REG_PARAMETERS_2 0x0018 +#define QCOM_ICE_REG_PARAMETERS_3 0x001C +#define QCOM_ICE_REG_PARAMETERS_4 0x0020 +#define QCOM_ICE_REG_PARAMETERS_5 0x0024 + +/* QCOM ICE v3.X only */ +#define QCOM_ICE_GENERAL_ERR_STTS 0x0040 +#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030 +#define QCOM_ICE_GENERAL_ERR_MASK 0x0044 + +/* QCOM ICE v2.X only */ +#define QCOM_ICE_REG_NON_SEC_IRQ_STTS 0x0040 +#define QCOM_ICE_REG_NON_SEC_IRQ_MASK 0x0044 + +#define QCOM_ICE_REG_NON_SEC_IRQ_CLR 0x0048 +#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME1 0x0050 +#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME2 0x0054 +#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME1 0x0058 +#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME2 0x005C +#define QCOM_ICE_REG_STREAM1_BIST_ERROR_VEC 0x0060 +#define QCOM_ICE_REG_STREAM2_BIST_ERROR_VEC 0x0064 +#define QCOM_ICE_REG_STREAM1_BIST_FINISH_VEC 0x0068 +#define QCOM_ICE_REG_STREAM2_BIST_FINISH_VEC 0x006C +#define QCOM_ICE_REG_BIST_STATUS 0x0070 +#define QCOM_ICE_REG_BYPASS_STATUS 0x0074 +#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 +#define QCOM_ICE_REG_ENDIAN_SWAP 0x1004 +#define QCOM_ICE_REG_TEST_BUS_CONTROL 0x1010 +#define QCOM_ICE_REG_TEST_BUS_REG 0x1014 + +/* BIST ("built-in self-test"?) status flags */ +#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000 + +#define QCOM_ICE_FUSE_SETTING_MASK 0x1 +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 + +#define qcom_ice_writel(host, val, reg) \ + writel((val), (host)->ice_mmio + (reg)) +#define qcom_ice_readl(host, reg) \ + readl((host)->ice_mmio + (reg)) + +static bool qcom_ice_supported(struct ufs_qcom_host *host) +{ + struct device *dev = host->hba->dev; + u32 regval = qcom_ice_readl(host, QCOM_ICE_REG_VERSION); + int major = regval >> 24; + int minor = (regval >> 16) & 0xFF; + int step = regval & 0xFFFF; + + /* For now this driver only supports ICE version 3. */ + if (major != 3) { + dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", + major, minor, step); + return false; + } + + dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", + major, minor, step); + + /* If fuses are blown, ICE might not work in the standard way. */ + regval = qcom_ice_readl(host, QCOM_ICE_REG_FUSE_SETTING); + if (regval & (QCOM_ICE_FUSE_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { + dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); + return false; + } + return true; +} + +int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + struct ufs_hba *hba = host->hba; + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + int err; + + if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & + MASK_CRYPTO_SUPPORT)) + return 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); + if (!res) { + dev_warn(dev, "ICE registers not found\n"); + goto disable; + } + + if (!qcom_scm_ice_available()) { + dev_warn(dev, "ICE SCM interface not found\n"); + goto disable; + } + + host->ice_mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(host->ice_mmio)) { + err = PTR_ERR(host->ice_mmio); + dev_err(dev, "Failed to map ICE registers; err=%d\n", err); + return err; + } + + if (!qcom_ice_supported(host)) + goto disable; + + return 0; + +disable: + dev_warn(dev, "Disabling inline encryption support\n"); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + return 0; +} + +static void qcom_ice_low_power_mode_enable(struct ufs_qcom_host *host) +{ + u32 regval; + + regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); + /* + * Enable low power mode sequence + * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 + */ + regval |= 0x7000; + qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); +} + +static void qcom_ice_optimization_enable(struct ufs_qcom_host *host) +{ + u32 regval; + + /* ICE Optimizations Enable Sequence */ + regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); + regval |= 0xD807100; + /* ICE HPG requires delay before writing */ + udelay(5); + qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); + udelay(5); +} + +int ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return 0; + qcom_ice_low_power_mode_enable(host); + qcom_ice_optimization_enable(host); + return ufs_qcom_ice_resume(host); +} + +/* Poll until all BIST bits are reset */ +static int qcom_ice_wait_bist_status(struct ufs_qcom_host *host) +{ + int count; + u32 reg; + + for (count = 0; count < 100; count++) { + reg = qcom_ice_readl(host, QCOM_ICE_REG_BIST_STATUS); + if (!(reg & QCOM_ICE_BIST_STATUS_MASK)) + break; + udelay(50); + } + if (reg) + return -ETIMEDOUT; + return 0; +} + +int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + int err; + + if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return 0; + + err = qcom_ice_wait_bist_status(host); + if (err) { + dev_err(host->hba->dev, "BIST status error (%d)\n", err); + return err; + } + return 0; +} + +/* + * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires + * vendor-specific SCM calls for this; it doesn't support the standard way. + */ +int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + union ufs_crypto_cap_entry cap; + union { + u8 bytes[AES_256_XTS_KEY_SIZE]; + u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; + } key; + int i; + int err; + + if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE)) + return qcom_scm_ice_invalidate_key(slot); + + /* Only AES-256-XTS has been tested so far. */ + cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; + if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || + cap.key_size != UFS_CRYPTO_KEY_SIZE_256) { + dev_err_ratelimited(hba->dev, + "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", + cap.algorithm_id, cap.key_size); + return -EINVAL; + } + + memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); + + /* + * The SCM call byte-swaps the 32-bit words of the key. So we have to + * do the same, in order for the final key be correct. + */ + for (i = 0; i < ARRAY_SIZE(key.words); i++) + __cpu_to_be32s(&key.words[i]); + + err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, + QCOM_SCM_ICE_CIPHER_AES_256_XTS, + cfg->data_unit_size); + memzero_explicit(&key, sizeof(key)); + return err; +} diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c new file mode 100644 index 000000000000..f10d4668814c --- /dev/null +++ b/drivers/ufs/host/ufs-qcom.c @@ -0,0 +1,1543 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. + */ + +#include <linux/acpi.h> +#include <linux/time.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/gpio/consumer.h> +#include <linux/reset-controller.h> +#include <linux/devfreq.h> + +#include <ufs/ufshcd.h> +#include "ufshcd-pltfrm.h" +#include <ufs/unipro.h> +#include "ufs-qcom.h" +#include <ufs/ufshci.h> +#include <ufs/ufs_quirks.h> + +#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ + (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + +enum { + TSTBUS_UAWM, + TSTBUS_UARM, + TSTBUS_TXUC, + TSTBUS_RXUC, + TSTBUS_DFC, + TSTBUS_TRLUT, + TSTBUS_TMRLUT, + TSTBUS_OCSC, + TSTBUS_UTP_HCI, + TSTBUS_COMBINED, + TSTBUS_WRAPPER, + TSTBUS_UNIPRO, + TSTBUS_MAX, +}; + +static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; + +static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); +static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, + u32 clk_cycles); + +static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) +{ + return container_of(rcd, struct ufs_qcom_host, rcdev); +} + +static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, + const char *prefix, void *priv) +{ + ufshcd_dump_regs(hba, offset, len * 4, prefix); +} + +static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) +{ + int err = 0; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); + if (err) + dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", + __func__, err); + + return err; +} + +static int ufs_qcom_host_clk_get(struct device *dev, + const char *name, struct clk **clk_out, bool optional) +{ + struct clk *clk; + int err = 0; + + clk = devm_clk_get(dev, name); + if (!IS_ERR(clk)) { + *clk_out = clk; + return 0; + } + + err = PTR_ERR(clk); + + if (optional && err == -ENOENT) { + *clk_out = NULL; + return 0; + } + + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to get %s err %d\n", name, err); + + return err; +} + +static int ufs_qcom_host_clk_enable(struct device *dev, + const char *name, struct clk *clk) +{ + int err = 0; + + err = clk_prepare_enable(clk); + if (err) + dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); + + return err; +} + +static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) +{ + if (!host->is_lane_clks_enabled) + return; + + clk_disable_unprepare(host->tx_l1_sync_clk); + clk_disable_unprepare(host->tx_l0_sync_clk); + clk_disable_unprepare(host->rx_l1_sync_clk); + clk_disable_unprepare(host->rx_l0_sync_clk); + + host->is_lane_clks_enabled = false; +} + +static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) +{ + int err = 0; + struct device *dev = host->hba->dev; + + if (host->is_lane_clks_enabled) + return 0; + + err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", + host->rx_l0_sync_clk); + if (err) + goto out; + + err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", + host->tx_l0_sync_clk); + if (err) + goto disable_rx_l0; + + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", + host->rx_l1_sync_clk); + if (err) + goto disable_tx_l0; + + err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", + host->tx_l1_sync_clk); + if (err) + goto disable_rx_l1; + + host->is_lane_clks_enabled = true; + goto out; + +disable_rx_l1: + clk_disable_unprepare(host->rx_l1_sync_clk); +disable_tx_l0: + clk_disable_unprepare(host->tx_l0_sync_clk); +disable_rx_l0: + clk_disable_unprepare(host->rx_l0_sync_clk); +out: + return err; +} + +static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) +{ + int err = 0; + struct device *dev = host->hba->dev; + + if (has_acpi_companion(dev)) + return 0; + + err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", + &host->rx_l0_sync_clk, false); + if (err) + goto out; + + err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", + &host->tx_l0_sync_clk, false); + if (err) + goto out; + + /* In case of single lane per direction, don't read lane1 clocks */ + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", + &host->rx_l1_sync_clk, false); + if (err) + goto out; + + err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", + &host->tx_l1_sync_clk, true); + } +out: + return err; +} + +static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) +{ + u32 tx_lanes; + + return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); +} + +static int ufs_qcom_check_hibern8(struct ufs_hba *hba) +{ + int err; + u32 tx_fsm_val = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); + + do { + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + if (err || tx_fsm_val == TX_FSM_HIBERN8) + break; + + /* sleep for max. 200us */ + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + /* + * we might have scheduled out for long during polling so + * check the state again. + */ + if (time_after(jiffies, timeout)) + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + + if (err) { + dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", + __func__, err); + } else if (tx_fsm_val != TX_FSM_HIBERN8) { + err = tx_fsm_val; + dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", + __func__, err); + } + + return err; +} + +static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) +{ + ufshcd_rmwl(host->hba, QUNIPRO_SEL, + ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, + REG_UFS_CFG1); + /* make sure above configuration is applied before we return */ + mb(); +} + +/* + * ufs_qcom_host_reset - reset host controller and PHY + */ +static int ufs_qcom_host_reset(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + bool reenable_intr = false; + + if (!host->core_reset) { + dev_warn(hba->dev, "%s: reset control not set\n", __func__); + goto out; + } + + reenable_intr = hba->is_irq_enabled; + disable_irq(hba->irq); + hba->is_irq_enabled = false; + + ret = reset_control_assert(host->core_reset); + if (ret) { + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + goto out; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(host->core_reset); + if (ret) + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + + usleep_range(1000, 1100); + + if (reenable_intr) { + enable_irq(hba->irq); + hba->is_irq_enabled = true; + } + +out: + return ret; +} + +static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + int ret = 0; + bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B; + + /* Reset UFS Host Controller and PHY */ + ret = ufs_qcom_host_reset(hba); + if (ret) + dev_warn(hba->dev, "%s: host reset returned %d\n", + __func__, ret); + + if (is_rate_B) + phy_set_mode(phy, PHY_MODE_UFS_HS_B); + + /* phy initialization - calibrate the phy */ + ret = phy_init(phy); + if (ret) { + dev_err(hba->dev, "%s: phy init failed, ret = %d\n", + __func__, ret); + goto out; + } + + /* power on phy - start serdes and phy's power and clocks */ + ret = phy_power_on(phy); + if (ret) { + dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", + __func__, ret); + goto out_disable_phy; + } + + ufs_qcom_select_unipro_mode(host); + + return 0; + +out_disable_phy: + phy_exit(phy); +out: + return ret; +} + +/* + * The UTP controller has a number of internal clock gating cells (CGCs). + * Internal hardware sub-modules within the UTP controller control the CGCs. + * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved + * in a specific operation, UTP controller CGCs are by default disabled and + * this function enables them (after every UFS link startup) to save some power + * leakage. + */ +static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) +{ + ufshcd_writel(hba, + ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, + REG_UFS_CFG2); + + /* Ensure that HW clock gating is enabled before next operations */ + mb(); +} + +static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err = 0; + + switch (status) { + case PRE_CHANGE: + ufs_qcom_power_up_sequence(hba); + /* + * The PHY PLL output is the source of tx/rx lane symbol + * clocks, hence, enable the lane clocks only after PHY + * is initialized. + */ + err = ufs_qcom_enable_lane_clks(host); + break; + case POST_CHANGE: + /* check if UFS PHY moved from DISABLED to HIBERN8 */ + err = ufs_qcom_check_hibern8(hba); + ufs_qcom_enable_hw_clk_gating(hba); + ufs_qcom_ice_enable(host); + break; + default: + dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); + err = -EINVAL; + break; + } + return err; +} + +/* + * Returns zero for success and non-zero in case of a failure + */ +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, + u32 hs, u32 rate, bool update_link_startup_timer) +{ + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_clk_info *clki; + u32 core_clk_period_in_ns; + u32 tx_clk_cycles_per_us = 0; + unsigned long core_clk_rate = 0; + u32 core_clk_cycles_per_us = 0; + + static u32 pwm_fr_table[][2] = { + {UFS_PWM_G1, 0x1}, + {UFS_PWM_G2, 0x1}, + {UFS_PWM_G3, 0x1}, + {UFS_PWM_G4, 0x1}, + }; + + static u32 hs_fr_table_rA[][2] = { + {UFS_HS_G1, 0x1F}, + {UFS_HS_G2, 0x3e}, + {UFS_HS_G3, 0x7D}, + }; + + static u32 hs_fr_table_rB[][2] = { + {UFS_HS_G1, 0x24}, + {UFS_HS_G2, 0x49}, + {UFS_HS_G3, 0x92}, + }; + + /* + * The Qunipro controller does not use following registers: + * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & + * UFS_REG_PA_LINK_STARTUP_TIMER + * But UTP controller uses SYS1CLK_1US_REG register for Interrupt + * Aggregation logic. + */ + if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) + goto out; + + if (gear == 0) { + dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); + goto out_error; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core_clk")) + core_clk_rate = clk_get_rate(clki->clk); + } + + /* If frequency is smaller than 1MHz, set to 1MHz */ + if (core_clk_rate < DEFAULT_CLK_RATE_HZ) + core_clk_rate = DEFAULT_CLK_RATE_HZ; + + core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { + ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); + /* + * make sure above write gets applied before we return from + * this function. + */ + mb(); + } + + if (ufs_qcom_cap_qunipro(host)) + goto out; + + core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; + core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; + core_clk_period_in_ns &= MASK_CLK_NS_REG; + + switch (hs) { + case FASTAUTO_MODE: + case FAST_MODE: + if (rate == PA_HS_MODE_A) { + if (gear > ARRAY_SIZE(hs_fr_table_rA)) { + dev_err(hba->dev, + "%s: index %d exceeds table size %zu\n", + __func__, gear, + ARRAY_SIZE(hs_fr_table_rA)); + goto out_error; + } + tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; + } else if (rate == PA_HS_MODE_B) { + if (gear > ARRAY_SIZE(hs_fr_table_rB)) { + dev_err(hba->dev, + "%s: index %d exceeds table size %zu\n", + __func__, gear, + ARRAY_SIZE(hs_fr_table_rB)); + goto out_error; + } + tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; + } else { + dev_err(hba->dev, "%s: invalid rate = %d\n", + __func__, rate); + goto out_error; + } + break; + case SLOWAUTO_MODE: + case SLOW_MODE: + if (gear > ARRAY_SIZE(pwm_fr_table)) { + dev_err(hba->dev, + "%s: index %d exceeds table size %zu\n", + __func__, gear, + ARRAY_SIZE(pwm_fr_table)); + goto out_error; + } + tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; + break; + case UNCHANGED: + default: + dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); + goto out_error; + } + + if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != + (core_clk_period_in_ns | tx_clk_cycles_per_us)) { + /* this register 2 fields shall be written at once */ + ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, + REG_UFS_TX_SYMBOL_CLK_NS_US); + /* + * make sure above write gets applied before we return from + * this function. + */ + mb(); + } + + if (update_link_startup_timer) { + ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), + REG_UFS_PA_LINK_STARTUP_TIMER); + /* + * make sure that this configuration is applied before + * we return + */ + mb(); + } + goto out; + +out_error: + ret = -EINVAL; +out: + return ret; +} + +static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + switch (status) { + case PRE_CHANGE: + if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, + 0, true)) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", + __func__); + err = -EINVAL; + goto out; + } + + if (ufs_qcom_cap_qunipro(host)) + /* + * set unipro core clock cycles to 150 & clear clock + * divider + */ + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, + 150); + + /* + * Some UFS devices (and may be host) have issues if LCC is + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) + err = ufshcd_disable_host_tx_lcc(hba); + + break; + case POST_CHANGE: + ufs_qcom_link_startup_post_change(hba); + break; + default: + break; + } + +out: + return err; +} + +static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* reset gpio is optional */ + if (!host->device_reset) + return; + + gpiod_set_value_cansleep(host->device_reset, asserted); +} + +static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + + if (status == PRE_CHANGE) + return 0; + + if (ufs_qcom_is_link_off(hba)) { + /* + * Disable the tx/rx lane symbol clocks before PHY is + * powered down as the PLL source should be disabled + * after downstream clocks are disabled. + */ + ufs_qcom_disable_lane_clks(host); + phy_power_off(phy); + + /* reset the connected UFS device during power down */ + ufs_qcom_device_reset_ctrl(hba, true); + + } else if (!ufs_qcom_is_link_active(hba)) { + ufs_qcom_disable_lane_clks(host); + } + + return 0; +} + +static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy = host->generic_phy; + int err; + + if (ufs_qcom_is_link_off(hba)) { + err = phy_power_on(phy); + if (err) { + dev_err(hba->dev, "%s: failed PHY power on: %d\n", + __func__, err); + return err; + } + + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; + + } else if (!ufs_qcom_is_link_active(hba)) { + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; + } + + return ufs_qcom_ice_resume(host); +} + +static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) +{ + if (host->dev_ref_clk_ctrl_mmio && + (enable ^ host->is_dev_ref_clk_enabled)) { + u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); + + if (enable) + temp |= host->dev_ref_clk_en_mask; + else + temp &= ~host->dev_ref_clk_en_mask; + + /* + * If we are here to disable this clock it might be immediately + * after entering into hibern8 in which case we need to make + * sure that device ref_clk is active for specific time after + * hibern8 enter. + */ + if (!enable) { + unsigned long gating_wait; + + gating_wait = host->hba->dev_info.clk_gating_wait_us; + if (!gating_wait) { + udelay(1); + } else { + /* + * bRefClkGatingWaitTime defines the minimum + * time for which the reference clock is + * required by device during transition from + * HS-MODE to LS-MODE or HIBERN8 state. Give it + * more delay to be on the safe side. + */ + gating_wait += 10; + usleep_range(gating_wait, gating_wait + 10); + } + } + + writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); + + /* + * Make sure the write to ref_clk reaches the destination and + * not stored in a Write Buffer (WB). + */ + readl(host->dev_ref_clk_ctrl_mmio); + + /* + * If we call hibern8 exit after this, we need to make sure that + * device ref_clk is stable for at least 1us before the hibern8 + * exit command. + */ + if (enable) + udelay(1); + + host->is_dev_ref_clk_enabled = enable; + } +} + +static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_dev_params ufs_qcom_cap; + int ret = 0; + + if (!dev_req_params) { + pr_err("%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + switch (status) { + case PRE_CHANGE: + ufshcd_init_pwr_dev_param(&ufs_qcom_cap); + ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; + + if (host->hw_ver.major == 0x1) { + /* + * HS-G3 operations may not reliably work on legacy QCOM + * UFS host controller hardware even though capability + * exchange during link startup phase may end up + * negotiating maximum supported gear as G3. + * Hence downgrade the maximum supported gear to HS-G2. + */ + if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) + ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; + if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) + ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; + } + + ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap, + dev_max_params, + dev_req_params); + if (ret) { + pr_err("%s: failed to determine capabilities\n", + __func__); + goto out; + } + + /* enable the device ref clock before changing to HS mode */ + if (!ufshcd_is_hs_mode(&hba->pwr_info) && + ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, true); + + if (host->hw_ver.major >= 0x4) { + ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } + break; + case POST_CHANGE: + if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, + dev_req_params->pwr_rx, + dev_req_params->hs_rate, false)) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", + __func__); + /* + * we return error code at the end of the routine, + * but continue to configure UFS_PHY_TX_LANE_ENABLE + * and bus voting as usual + */ + ret = -EINVAL; + } + + /* cache the power mode parameters to use internally */ + memcpy(&host->dev_req_params, + dev_req_params, sizeof(*dev_req_params)); + + /* disable the device ref clock if entered PWM mode */ + if (ufshcd_is_hs_mode(&hba->pwr_info) && + !ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, false); + break; + default: + ret = -EINVAL; + break; + } +out: + return ret; +} + +static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) +{ + int err; + u32 pa_vs_config_reg1; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + &pa_vs_config_reg1); + if (err) + goto out; + + /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), + (pa_vs_config_reg1 | (1 << 12))); + +out: + return err; +} + +static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) + err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) + hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; + + return err; +} + +static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (host->hw_ver.major == 0x1) + return ufshci_version(1, 1); + else + return ufshci_version(2, 0); +} + +/** + * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks + * @hba: host controller instance + * + * QCOM UFS host controller might have some non standard behaviours (quirks) + * than what is specified by UFSHCI specification. Advertise all such + * quirks to standard UFS host controller driver so standard takes them into + * account. + */ +static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (host->hw_ver.major == 0x01) { + hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; + + if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) + hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; + + hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; + } + + if (host->hw_ver.major == 0x2) { + hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; + + if (!ufs_qcom_cap_qunipro(host)) + /* Legacy UniPro mode still need following quirks */ + hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); + } +} + +static void ufs_qcom_set_caps(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + hba->caps |= UFSHCD_CAP_CLK_SCALING; + hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; + hba->caps |= UFSHCD_CAP_WB_EN; + hba->caps |= UFSHCD_CAP_CRYPTO; + hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + if (host->hw_ver.major >= 0x2) { + host->caps = UFS_QCOM_CAP_QUNIPRO | + UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; + } +} + +/** + * ufs_qcom_setup_clocks - enables/disable clocks + * @hba: host controller instance + * @on: If true, enable clocks else disable them. + * @status: PRE_CHANGE or POST_CHANGE notify + * + * Returns 0 on success, non-zero on failure. + */ +static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, + enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* + * In case ufs_qcom_init() is not yet done, simply ignore. + * This ufs_qcom_setup_clocks() shall be called from + * ufs_qcom_init() after init is done. + */ + if (!host) + return 0; + + switch (status) { + case PRE_CHANGE: + if (!on) { + if (!ufs_qcom_is_link_active(hba)) { + /* disable device ref_clk */ + ufs_qcom_dev_ref_clk_ctrl(host, false); + } + } + break; + case POST_CHANGE: + if (on) { + /* enable the device ref clock for HS mode*/ + if (ufshcd_is_hs_mode(&hba->pwr_info)) + ufs_qcom_dev_ref_clk_ctrl(host, true); + } + break; + } + + return 0; +} + +static int +ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); + + /* Currently this code only knows about a single reset. */ + WARN_ON(id); + ufs_qcom_assert_reset(host->hba); + /* provide 1ms delay to let the reset pulse propagate. */ + usleep_range(1000, 1100); + return 0; +} + +static int +ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); + + /* Currently this code only knows about a single reset. */ + WARN_ON(id); + ufs_qcom_deassert_reset(host->hba); + + /* + * after reset deassertion, phy will need all ref clocks, + * voltage, current to settle down before starting serdes. + */ + usleep_range(1000, 1100); + return 0; +} + +static const struct reset_control_ops ufs_qcom_reset_ops = { + .assert = ufs_qcom_reset_assert, + .deassert = ufs_qcom_reset_deassert, +}; + +/** + * ufs_qcom_init - bind phy with controller + * @hba: host controller instance + * + * Binds PHY with controller and powers up PHY enabling clocks + * and regulators. + * + * Returns -EPROBE_DEFER if binding fails, returns negative error + * on phy power up failure and returns zero on success. + */ +static int ufs_qcom_init(struct ufs_hba *hba) +{ + int err; + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct ufs_qcom_host *host; + struct resource *res; + struct ufs_clk_info *clki; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) { + err = -ENOMEM; + dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); + goto out; + } + + /* Make a two way bind between the qcom host and the hba */ + host->hba = hba; + ufshcd_set_variant(hba, host); + + /* Setup the optional reset control of HCI */ + host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); + if (IS_ERR(host->core_reset)) { + err = dev_err_probe(dev, PTR_ERR(host->core_reset), + "Failed to get reset control\n"); + goto out_variant_clear; + } + + /* Fire up the reset controller. Failure here is non-fatal. */ + host->rcdev.of_node = dev->of_node; + host->rcdev.ops = &ufs_qcom_reset_ops; + host->rcdev.owner = dev->driver->owner; + host->rcdev.nr_resets = 1; + err = devm_reset_controller_register(dev, &host->rcdev); + if (err) { + dev_warn(dev, "Failed to register reset controller\n"); + err = 0; + } + + if (!has_acpi_companion(dev)) { + host->generic_phy = devm_phy_get(dev, "ufsphy"); + if (IS_ERR(host->generic_phy)) { + err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); + goto out_variant_clear; + } + } + + host->device_reset = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(host->device_reset)) { + err = PTR_ERR(host->device_reset); + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to acquire reset gpio: %d\n", err); + goto out_variant_clear; + } + + ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, + &host->hw_ver.minor, &host->hw_ver.step); + + /* + * for newer controllers, device reference clock control bit has + * moved inside UFS controller register address space itself. + */ + if (host->hw_ver.major >= 0x02) { + host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; + host->dev_ref_clk_en_mask = BIT(26); + } else { + /* "dev_ref_clk_ctrl_mem" is optional resource */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dev_ref_clk_ctrl_mem"); + if (res) { + host->dev_ref_clk_ctrl_mmio = + devm_ioremap_resource(dev, res); + if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) + host->dev_ref_clk_ctrl_mmio = NULL; + host->dev_ref_clk_en_mask = BIT(5); + } + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core_clk_unipro")) + clki->keep_link_active = true; + } + + err = ufs_qcom_init_lane_clks(host); + if (err) + goto out_variant_clear; + + ufs_qcom_set_caps(hba); + ufs_qcom_advertise_quirks(hba); + + err = ufs_qcom_ice_init(host); + if (err) + goto out_variant_clear; + + ufs_qcom_setup_clocks(hba, true, POST_CHANGE); + + if (hba->dev->id < MAX_UFS_QCOM_HOSTS) + ufs_qcom_hosts[hba->dev->id] = host; + + host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; + ufs_qcom_get_default_testbus_cfg(host); + err = ufs_qcom_testbus_config(host); + if (err) { + dev_warn(dev, "%s: failed to configure the testbus %d\n", + __func__, err); + err = 0; + } + + goto out; + +out_variant_clear: + ufshcd_set_variant(hba, NULL); +out: + return err; +} + +static void ufs_qcom_exit(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + ufs_qcom_disable_lane_clks(host); + phy_power_off(host->generic_phy); + phy_exit(host->generic_phy); +} + +static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, + u32 clk_cycles) +{ + int err; + u32 core_clk_ctrl_reg; + + if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) + return -EINVAL; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + if (err) + goto out; + + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; + core_clk_ctrl_reg |= clk_cycles; + + /* Clear CORE_CLK_DIV_EN */ + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); +out: + return err; +} + +static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) +{ + /* nothing to do as of now */ + return 0; +} + +static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + /* set unipro core clock cycles to 150 and clear clock divider */ + return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); +} + +static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err; + u32 core_clk_ctrl_reg; + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + + /* make sure CORE_CLK_DIV_EN is cleared */ + if (!err && + (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); + } + + return err; +} + +static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + /* set unipro core clock cycles to 75 and clear clock divider */ + return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); +} + +static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, + bool scale_up, enum ufs_notify_change_status status) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; + int err = 0; + + if (status == PRE_CHANGE) { + err = ufshcd_uic_hibern8_enter(hba); + if (err) + return err; + if (scale_up) + err = ufs_qcom_clk_scale_up_pre_change(hba); + else + err = ufs_qcom_clk_scale_down_pre_change(hba); + if (err) + ufshcd_uic_hibern8_exit(hba); + + } else { + if (scale_up) + err = ufs_qcom_clk_scale_up_post_change(hba); + else + err = ufs_qcom_clk_scale_down_post_change(hba); + + + if (err || !dev_req_params) { + ufshcd_uic_hibern8_exit(hba); + goto out; + } + + ufs_qcom_cfg_timers(hba, + dev_req_params->gear_rx, + dev_req_params->pwr_rx, + dev_req_params->hs_rate, + false); + ufshcd_uic_hibern8_exit(hba); + } + +out: + return err; +} + +static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, + void *priv, void (*print_fn)(struct ufs_hba *hba, + int offset, int num_regs, const char *str, void *priv)) +{ + u32 reg; + struct ufs_qcom_host *host; + + if (unlikely(!hba)) { + pr_err("%s: hba is NULL\n", __func__); + return; + } + if (unlikely(!print_fn)) { + dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); + return; + } + + host = ufshcd_get_variant(hba); + if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) + return; + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); + print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); + + reg = ufshcd_readl(hba, REG_UFS_CFG1); + reg |= UTP_DBG_RAMS_EN; + ufshcd_writel(hba, reg, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); + print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); + print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); + print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); + + /* clear bit 17 - UTP_DBG_RAMS_EN */ + ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); + print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); + print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); + print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); + print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); + print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); +} + +static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) +{ + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, + UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); + ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); + } else { + ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); + ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); + } +} + +static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) +{ + /* provide a legal default configuration */ + host->testbus.select_major = TSTBUS_UNIPRO; + host->testbus.select_minor = 37; +} + +static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) +{ + if (host->testbus.select_major >= TSTBUS_MAX) { + dev_err(host->hba->dev, + "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", + __func__, host->testbus.select_major); + return false; + } + + return true; +} + +int ufs_qcom_testbus_config(struct ufs_qcom_host *host) +{ + int reg; + int offset; + u32 mask = TEST_BUS_SUB_SEL_MASK; + + if (!host) + return -EINVAL; + + if (!ufs_qcom_testbus_cfg_is_ok(host)) + return -EPERM; + + switch (host->testbus.select_major) { + case TSTBUS_UAWM: + reg = UFS_TEST_BUS_CTRL_0; + offset = 24; + break; + case TSTBUS_UARM: + reg = UFS_TEST_BUS_CTRL_0; + offset = 16; + break; + case TSTBUS_TXUC: + reg = UFS_TEST_BUS_CTRL_0; + offset = 8; + break; + case TSTBUS_RXUC: + reg = UFS_TEST_BUS_CTRL_0; + offset = 0; + break; + case TSTBUS_DFC: + reg = UFS_TEST_BUS_CTRL_1; + offset = 24; + break; + case TSTBUS_TRLUT: + reg = UFS_TEST_BUS_CTRL_1; + offset = 16; + break; + case TSTBUS_TMRLUT: + reg = UFS_TEST_BUS_CTRL_1; + offset = 8; + break; + case TSTBUS_OCSC: + reg = UFS_TEST_BUS_CTRL_1; + offset = 0; + break; + case TSTBUS_WRAPPER: + reg = UFS_TEST_BUS_CTRL_2; + offset = 16; + break; + case TSTBUS_COMBINED: + reg = UFS_TEST_BUS_CTRL_2; + offset = 8; + break; + case TSTBUS_UTP_HCI: + reg = UFS_TEST_BUS_CTRL_2; + offset = 0; + break; + case TSTBUS_UNIPRO: + reg = UFS_UNIPRO_CFG; + offset = 20; + mask = 0xFFF; + break; + /* + * No need for a default case, since + * ufs_qcom_testbus_cfg_is_ok() checks that the configuration + * is legal + */ + } + mask <<= offset; + ufshcd_rmwl(host->hba, TEST_BUS_SEL, + (u32)host->testbus.select_major << 19, + REG_UFS_CFG1); + ufshcd_rmwl(host->hba, mask, + (u32)host->testbus.select_minor << offset, + reg); + ufs_qcom_enable_test_bus(host); + /* + * Make sure the test bus configuration is + * committed before returning. + */ + mb(); + + return 0; +} + +static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, + "HCI Vendor Specific Registers "); + + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); +} + +/** + * ufs_qcom_device_reset() - toggle the (optional) device reset line + * @hba: per-adapter instance + * + * Toggles the (optional) reset line to reset the attached device. + */ +static int ufs_qcom_device_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* reset gpio is optional */ + if (!host->device_reset) + return -EOPNOTSUPP; + + /* + * The UFS device shall detect reset pulses of 1us, sleep for 10us to + * be on the safe side. + */ + ufs_qcom_device_reset_ctrl(hba, true); + usleep_range(10, 15); + + ufs_qcom_device_reset_ctrl(hba, false); + usleep_range(10, 15); + + return 0; +} + +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) +static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, + struct devfreq_dev_profile *p, + struct devfreq_simple_ondemand_data *d) +{ + p->polling_ms = 60; + d->upthreshold = 70; + d->downdifferential = 5; +} +#else +static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, + struct devfreq_dev_profile *p, + struct devfreq_simple_ondemand_data *data) +{ +} +#endif + +/* + * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations + * + * The variant operations configure the necessary controller and PHY + * handshake during initialization. + */ +static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { + .name = "qcom", + .init = ufs_qcom_init, + .exit = ufs_qcom_exit, + .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, + .clk_scale_notify = ufs_qcom_clk_scale_notify, + .setup_clocks = ufs_qcom_setup_clocks, + .hce_enable_notify = ufs_qcom_hce_enable_notify, + .link_startup_notify = ufs_qcom_link_startup_notify, + .pwr_change_notify = ufs_qcom_pwr_change_notify, + .apply_dev_quirks = ufs_qcom_apply_dev_quirks, + .suspend = ufs_qcom_suspend, + .resume = ufs_qcom_resume, + .dbg_register_dump = ufs_qcom_dump_dbg_regs, + .device_reset = ufs_qcom_device_reset, + .config_scaling_param = ufs_qcom_config_scaling_param, + .program_key = ufs_qcom_ice_program_key, +}; + +/** + * ufs_qcom_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Return zero for success and non-zero for failure + */ +static int ufs_qcom_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * ufs_qcom_remove - set driver_data of the device to NULL + * @pdev: pointer to platform device handle + * + * Always returns 0 + */ +static int ufs_qcom_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +static const struct of_device_id ufs_qcom_of_match[] = { + { .compatible = "qcom,ufshc"}, + {}, +}; +MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ufs_qcom_acpi_match[] = { + { "QCOM24A5" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); +#endif + +static const struct dev_pm_ops ufs_qcom_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +}; + +static struct platform_driver ufs_qcom_pltform = { + .probe = ufs_qcom_probe, + .remove = ufs_qcom_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-qcom", + .pm = &ufs_qcom_pm_ops, + .of_match_table = of_match_ptr(ufs_qcom_of_match), + .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), + }, +}; +module_platform_driver(ufs_qcom_pltform); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h new file mode 100644 index 000000000000..44466a395bb5 --- /dev/null +++ b/drivers/ufs/host/ufs-qcom.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + */ + +#ifndef UFS_QCOM_H_ +#define UFS_QCOM_H_ + +#include <linux/reset-controller.h> +#include <linux/reset.h> +#include <ufs/ufshcd.h> + +#define MAX_UFS_QCOM_HOSTS 1 +#define MAX_U32 (~(u32)0) +#define MPHY_TX_FSM_STATE 0x41 +#define TX_FSM_HIBERN8 0x1 +#define HBRN8_POLL_TOUT_MS 100 +#define DEFAULT_CLK_RATE_HZ 1000000 +#define BUS_VECTOR_NAME_LEN 32 + +#define UFS_HW_VER_MAJOR_SHFT (28) +#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT) +#define UFS_HW_VER_MINOR_SHFT (16) +#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT) +#define UFS_HW_VER_STEP_SHFT (0) +#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT) + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B + +/* QCOM UFS host controller vendor specific registers */ +enum { + REG_UFS_SYS1CLK_1US = 0xC0, + REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4, + REG_UFS_LOCAL_PORT_ID_REG = 0xC8, + REG_UFS_PA_ERR_CODE = 0xCC, + REG_UFS_RETRY_TIMER_REG = 0xD0, + REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8, + REG_UFS_CFG1 = 0xDC, + REG_UFS_CFG2 = 0xE0, + REG_UFS_HW_VERSION = 0xE4, + + UFS_TEST_BUS = 0xE8, + UFS_TEST_BUS_CTRL_0 = 0xEC, + UFS_TEST_BUS_CTRL_1 = 0xF0, + UFS_TEST_BUS_CTRL_2 = 0xF4, + UFS_UNIPRO_CFG = 0xF8, + + /* + * QCOM UFS host controller vendor specific registers + * added in HW Version 3.0.0 + */ + UFS_AH8_CFG = 0xFC, +}; + +/* QCOM UFS host controller vendor specific debug registers */ +enum { + UFS_DBG_RD_REG_UAWM = 0x100, + UFS_DBG_RD_REG_UARM = 0x200, + UFS_DBG_RD_REG_TXUC = 0x300, + UFS_DBG_RD_REG_RXUC = 0x400, + UFS_DBG_RD_REG_DFC = 0x500, + UFS_DBG_RD_REG_TRLUT = 0x600, + UFS_DBG_RD_REG_TMRLUT = 0x700, + UFS_UFS_DBG_RD_REG_OCSC = 0x800, + + UFS_UFS_DBG_RD_DESC_RAM = 0x1500, + UFS_UFS_DBG_RD_PRDT_RAM = 0x1700, + UFS_UFS_DBG_RD_RESP_RAM = 0x1800, + UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, +}; + +#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x) +#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x) + +/* bit definitions for REG_UFS_CFG1 register */ +#define QUNIPRO_SEL 0x1 +#define UTP_DBG_RAMS_EN 0x20000 +#define TEST_BUS_EN BIT(18) +#define TEST_BUS_SEL GENMASK(22, 19) +#define UFS_REG_TEST_BUS_EN BIT(30) + +/* bit definitions for REG_UFS_CFG2 register */ +#define UAWM_HW_CGC_EN (1 << 0) +#define UARM_HW_CGC_EN (1 << 1) +#define TXUC_HW_CGC_EN (1 << 2) +#define RXUC_HW_CGC_EN (1 << 3) +#define DFC_HW_CGC_EN (1 << 4) +#define TRLUT_HW_CGC_EN (1 << 5) +#define TMRLUT_HW_CGC_EN (1 << 6) +#define OCSC_HW_CGC_EN (1 << 7) + +/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */ +#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */ + +#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ + TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ + DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ + TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) + +/* bit offset */ +enum { + OFFSET_UFS_PHY_SOFT_RESET = 1, + OFFSET_CLK_NS_REG = 10, +}; + +/* bit masks */ +enum { + MASK_UFS_PHY_SOFT_RESET = 0x2, + MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF, + MASK_CLK_NS_REG = 0xFFFC00, +}; + +/* QCOM UFS debug print bit mask */ +#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0) +#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1) +#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2) + +#define UFS_QCOM_DBG_PRINT_ALL \ + (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \ + UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + +/* QUniPro Vendor specific attributes */ +#define PA_VS_CONFIG_REG1 0x9000 +#define DME_VS_CORE_CLK_CTRL 0xD002 +/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ +#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8) +#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF + +static inline void +ufs_qcom_get_controller_revision(struct ufs_hba *hba, + u8 *major, u16 *minor, u16 *step) +{ + u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); + + *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT; + *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT; + *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT; +}; + +static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, + 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); + + /* + * Make sure assertion of ufs phy reset is written to + * register before returning + */ + mb(); +} + +static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) +{ + ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, + 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); + + /* + * Make sure de-assertion of ufs phy reset is written to + * register before returning + */ + mb(); +} + +/* Host controller hardware version: major.minor.step */ +struct ufs_hw_version { + u16 step; + u16 minor; + u8 major; +}; + +struct ufs_qcom_testbus { + u8 select_major; + u8 select_minor; +}; + +struct gpio_desc; + +struct ufs_qcom_host { + /* + * Set this capability if host controller supports the QUniPro mode + * and if driver wants the Host controller to operate in QUniPro mode. + * Note: By default this capability will be kept enabled if host + * controller supports the QUniPro mode. + */ + #define UFS_QCOM_CAP_QUNIPRO 0x1 + + /* + * Set this capability if host controller can retain the secure + * configuration even after UFS controller core power collapse. + */ + #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2 + u32 caps; + + struct phy *generic_phy; + struct ufs_hba *hba; + struct ufs_pa_layer_attr dev_req_params; + struct clk *rx_l0_sync_clk; + struct clk *tx_l0_sync_clk; + struct clk *rx_l1_sync_clk; + struct clk *tx_l1_sync_clk; + bool is_lane_clks_enabled; + + void __iomem *dev_ref_clk_ctrl_mmio; + bool is_dev_ref_clk_enabled; + struct ufs_hw_version hw_ver; +#ifdef CONFIG_SCSI_UFS_CRYPTO + void __iomem *ice_mmio; +#endif + + u32 dev_ref_clk_en_mask; + + /* Bitmask for enabling debug prints */ + u32 dbg_print_en; + struct ufs_qcom_testbus testbus; + + /* Reset control of HCI */ + struct reset_control *core_reset; + struct reset_controller_dev rcdev; + + struct gpio_desc *device_reset; +}; + +static inline u32 +ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg) +{ + if (host->hw_ver.major <= 0x02) + return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg); + + return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg); +}; + +#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) +#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) +#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) + +int ufs_qcom_testbus_config(struct ufs_qcom_host *host); + +static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host) +{ + return host->caps & UFS_QCOM_CAP_QUNIPRO; +} + +/* ufs-qcom-ice.c */ + +#ifdef CONFIG_SCSI_UFS_CRYPTO +int ufs_qcom_ice_init(struct ufs_qcom_host *host); +int ufs_qcom_ice_enable(struct ufs_qcom_host *host); +int ufs_qcom_ice_resume(struct ufs_qcom_host *host); +int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot); +#else +static inline int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + return 0; +} +static inline int ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + return 0; +} +static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + return 0; +} +#define ufs_qcom_ice_program_key NULL +#endif /* !CONFIG_SCSI_UFS_CRYPTO */ + +#endif /* UFS_QCOM_H_ */ diff --git a/drivers/ufs/host/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c new file mode 100644 index 000000000000..e28a67e1e314 --- /dev/null +++ b/drivers/ufs/host/ufshcd-dwc.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#include <linux/module.h> + +#include <ufs/ufshcd.h> +#include <ufs/unipro.h> + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n) +{ + int ret = 0; + int attr_node = 0; + + for (attr_node = 0; attr_node < n; attr_node++) { + ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel, + ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer); + + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs); + +/** + * ufshcd_dwc_program_clk_div() + * This function programs the clk divider value. This value is needed to + * provide 1 microsecond tick to unipro layer. + * @hba: Private Structure pointer + * @divider_val: clock divider value to be programmed + * + */ +static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) +{ + ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV); +} + +/** + * ufshcd_dwc_link_is_up() + * Check if link is up + * @hba: private structure pointer + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) +{ + int dme_result = 0; + + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result); + + if (dme_result == UFSHCD_LINK_IS_UP) { + ufshcd_set_link_active(hba); + return 0; + } + + return 1; +} + +/** + * ufshcd_dwc_connection_setup() + * This function configures both the local side (host) and the peer side + * (device) unipro attributes to establish the connection to application/ + * cport. + * This function is not required if the hardware is properly configured to + * have this connection setup on reset. But invoking this function does no + * harm and should be fine even working with any ufs device. + * + * @hba: pointer to drivers private data + * + * Returns 0 on success non-zero value on failure + */ +static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) +{ + static const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER } + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs)); +} + +/** + * ufshcd_dwc_link_startup_notify() + * UFS Host DWC specific link startup sequence + * @hba: private structure pointer + * @status: Callback notify status + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + if (status == PRE_CHANGE) { + ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125); + + err = ufshcd_vops_phy_initialization(hba); + if (err) { + dev_err(hba->dev, "Phy setup failed (%d)\n", err); + goto out; + } + } else { /* POST_CHANGE */ + err = ufshcd_dwc_link_is_up(hba); + if (err) { + dev_err(hba->dev, "Link is not up\n"); + goto out; + } + + err = ufshcd_dwc_connection_setup(hba); + if (err) + dev_err(hba->dev, "Connection setup failed (%d)\n", + err); + } + +out: + return err; +} +EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify); + +MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>"); +MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h new file mode 100644 index 000000000000..ad91ea56662c --- /dev/null +++ b/drivers/ufs/host/ufshcd-dwc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#ifndef _UFSHCD_DWC_H +#define _UFSHCD_DWC_H + +#include <ufs/ufshcd.h> + +struct ufshcd_dme_attr_val { + u32 attr_sel; + u32 mib_val; + u8 peer; +}; + +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status); +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n); +#endif /* End of Header */ diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c new file mode 100644 index 000000000000..04166bda41da --- /dev/null +++ b/drivers/ufs/host/ufshcd-pci.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Universal Flash Storage Host controller PCI glue driver + * + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + */ + +#include <ufs/ufshcd.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/pm_qos.h> +#include <linux/debugfs.h> +#include <linux/uuid.h> +#include <linux/acpi.h> +#include <linux/gpio/consumer.h> + +struct ufs_host { + void (*late_init)(struct ufs_hba *hba); +}; + +enum { + INTEL_DSM_FNS = 0, + INTEL_DSM_RESET = 1, +}; + +struct intel_host { + struct ufs_host ufs_host; + u32 dsm_fns; + u32 active_ltr; + u32 idle_ltr; + struct dentry *debugfs_root; + struct gpio_desc *reset_gpio; +}; + +static const guid_t intel_dsm_guid = + GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA, + 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50); + +static int __intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + union acpi_object *obj; + int err = 0; + size_t len; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); + if (!obj) + return -EOPNOTSUPP; + + if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) { + err = -EINVAL; + goto out; + } + + len = min_t(size_t, obj->buffer.length, 4); + + *result = 0; + memcpy(result, obj->buffer.pointer, len); +out: + ACPI_FREE(obj); + + return err; +} + +static int intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) + return -EOPNOTSUPP; + + return __intel_dsm(intel_host, dev, fn, result); +} + +static void intel_dsm_init(struct intel_host *intel_host, struct device *dev) +{ + int err; + + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); + dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err); +} + +static int ufs_intel_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + /* Cannot enable ICE until after HC enable */ + if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) { + u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE); + + hce |= CRYPTO_GENERAL_ENABLE; + ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE); + } + + return 0; +} + +static int ufs_intel_disable_lcc(struct ufs_hba *hba) +{ + u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE); + u32 lcc_enable = 0; + + ufshcd_dme_get(hba, attr, &lcc_enable); + if (lcc_enable) + ufshcd_disable_host_tx_lcc(hba); + + return 0; +} + +static int ufs_intel_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + err = ufs_intel_disable_lcc(hba); + break; + case POST_CHANGE: + break; + default: + break; + } + + return err; +} + +static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) +{ + struct ufs_pa_layer_attr pwr_info = hba->pwr_info; + int ret; + + pwr_info.lane_rx = lanes; + pwr_info.lane_tx = lanes; + ret = ufshcd_config_pwr_mode(hba, &pwr_info); + if (ret) + dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", + __func__, lanes, ret); + return ret; +} + +static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int err = 0; + + switch (status) { + case PRE_CHANGE: + if (ufshcd_is_hs_mode(dev_max_params) && + (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) + ufs_intel_set_lanes(hba, 2); + memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); + break; + case POST_CHANGE: + if (ufshcd_is_hs_mode(dev_req_params)) { + u32 peer_granularity; + + usleep_range(1000, 1250); + err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &peer_granularity); + } + break; + default: + break; + } + + return err; +} + +static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) +{ + u32 granularity, peer_granularity; + u32 pa_tactivate, peer_pa_tactivate; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity); + if (ret) + goto out; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate); + if (ret) + goto out; + + if (granularity == peer_granularity) { + u32 new_peer_pa_tactivate = pa_tactivate + 2; + + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate); + } +out: + return ret; +} + +#define INTEL_ACTIVELTR 0x804 +#define INTEL_IDLELTR 0x808 + +#define INTEL_LTR_REQ BIT(15) +#define INTEL_LTR_SCALE_MASK GENMASK(11, 10) +#define INTEL_LTR_SCALE_1US (2 << 10) +#define INTEL_LTR_SCALE_32US (3 << 10) +#define INTEL_LTR_VALUE_MASK GENMASK(9, 0) + +static void intel_cache_ltr(struct ufs_hba *hba) +{ + struct intel_host *host = ufshcd_get_variant(hba); + + host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR); + host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR); +} + +static void intel_ltr_set(struct device *dev, s32 val) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct intel_host *host = ufshcd_get_variant(hba); + u32 ltr; + + pm_runtime_get_sync(dev); + + /* + * Program latency tolerance (LTR) accordingly what has been asked + * by the PM QoS layer or disable it in case we were passed + * negative value or PM_QOS_LATENCY_ANY. + */ + ltr = readl(hba->mmio_base + INTEL_ACTIVELTR); + + if (val == PM_QOS_LATENCY_ANY || val < 0) { + ltr &= ~INTEL_LTR_REQ; + } else { + ltr |= INTEL_LTR_REQ; + ltr &= ~INTEL_LTR_SCALE_MASK; + ltr &= ~INTEL_LTR_VALUE_MASK; + + if (val > INTEL_LTR_VALUE_MASK) { + val >>= 5; + if (val > INTEL_LTR_VALUE_MASK) + val = INTEL_LTR_VALUE_MASK; + ltr |= INTEL_LTR_SCALE_32US | val; + } else { + ltr |= INTEL_LTR_SCALE_1US | val; + } + } + + if (ltr == host->active_ltr) + goto out; + + writel(ltr, hba->mmio_base + INTEL_ACTIVELTR); + writel(ltr, hba->mmio_base + INTEL_IDLELTR); + + /* Cache the values into intel_host structure */ + intel_cache_ltr(hba); +out: + pm_runtime_put(dev); +} + +static void intel_ltr_expose(struct device *dev) +{ + dev->power.set_latency_tolerance = intel_ltr_set; + dev_pm_qos_expose_latency_tolerance(dev); +} + +static void intel_ltr_hide(struct device *dev) +{ + dev_pm_qos_hide_latency_tolerance(dev); + dev->power.set_latency_tolerance = NULL; +} + +static void intel_add_debugfs(struct ufs_hba *hba) +{ + struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL); + struct intel_host *host = ufshcd_get_variant(hba); + + intel_cache_ltr(hba); + + host->debugfs_root = dir; + debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr); + debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr); +} + +static void intel_remove_debugfs(struct ufs_hba *hba) +{ + struct intel_host *host = ufshcd_get_variant(hba); + + debugfs_remove_recursive(host->debugfs_root); +} + +static int ufs_intel_device_reset(struct ufs_hba *hba) +{ + struct intel_host *host = ufshcd_get_variant(hba); + + if (host->dsm_fns & INTEL_DSM_RESET) { + u32 result = 0; + int err; + + err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result); + if (!err && !result) + err = -EIO; + if (err) + dev_err(hba->dev, "%s: DSM error %d result %u\n", + __func__, err, result); + return err; + } + + if (!host->reset_gpio) + return -EOPNOTSUPP; + + gpiod_set_value_cansleep(host->reset_gpio, 1); + usleep_range(10, 15); + + gpiod_set_value_cansleep(host->reset_gpio, 0); + usleep_range(10, 15); + + return 0; +} + +static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev) +{ + /* GPIO in _DSD has active low setting */ + return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); +} + +static int ufs_intel_common_init(struct ufs_hba *hba) +{ + struct intel_host *host; + + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); + intel_dsm_init(host, hba->dev); + if (host->dsm_fns & INTEL_DSM_RESET) { + if (hba->vops->device_reset) + hba->caps |= UFSHCD_CAP_DEEPSLEEP; + } else { + if (hba->vops->device_reset) + host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev); + if (IS_ERR(host->reset_gpio)) { + dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n", + __func__, PTR_ERR(host->reset_gpio)); + host->reset_gpio = NULL; + } + if (host->reset_gpio) { + gpiod_set_value_cansleep(host->reset_gpio, 0); + hba->caps |= UFSHCD_CAP_DEEPSLEEP; + } + } + intel_ltr_expose(hba->dev); + intel_add_debugfs(hba); + return 0; +} + +static void ufs_intel_common_exit(struct ufs_hba *hba) +{ + intel_remove_debugfs(hba); + intel_ltr_hide(hba->dev); +} + +static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op) +{ + if (ufshcd_is_link_hibern8(hba)) { + int ret = ufshcd_uic_hibern8_exit(hba); + + if (!ret) { + ufshcd_set_link_active(hba); + } else { + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + /* + * Force reset and restore. Any other actions can lead + * to an unrecoverable state. + */ + ufshcd_set_link_off(hba); + } + } + + return 0; +} + +static int ufs_intel_ehl_init(struct ufs_hba *hba) +{ + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + return ufs_intel_common_init(hba); +} + +static void ufs_intel_lkf_late_init(struct ufs_hba *hba) +{ + /* LKF always needs a full reset, so set PM accordingly */ + if (hba->caps & UFSHCD_CAP_DEEPSLEEP) { + hba->spm_lvl = UFS_PM_LVL_6; + hba->rpm_lvl = UFS_PM_LVL_6; + } else { + hba->spm_lvl = UFS_PM_LVL_5; + hba->rpm_lvl = UFS_PM_LVL_5; + } +} + +static int ufs_intel_lkf_init(struct ufs_hba *hba) +{ + struct ufs_host *ufs_host; + int err; + + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_CRYPTO; + err = ufs_intel_common_init(hba); + ufs_host = ufshcd_get_variant(hba); + ufs_host->late_init = ufs_intel_lkf_late_init; + return err; +} + +static int ufs_intel_adl_init(struct ufs_hba *hba) +{ + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + return ufs_intel_common_init(hba); +} + +static int ufs_intel_mtl_init(struct ufs_hba *hba) +{ + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; + return ufs_intel_common_init(hba); +} + +static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_common_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, +}; + +static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_ehl_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, +}; + +static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_lkf_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .pwr_change_notify = ufs_intel_lkf_pwr_change_notify, + .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + +static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_adl_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + +static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_mtl_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + +#ifdef CONFIG_PM_SLEEP +static int ufshcd_pci_restore(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + /* Force a full reset and restore */ + ufshcd_set_link_off(hba); + + return ufshcd_system_resume(dev); +} +#endif + +/** + * ufshcd_pci_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void ufshcd_pci_shutdown(struct pci_dev *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +/** + * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev: pointer to PCI handle + */ +static void ufshcd_pci_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); + ufshcd_dealloc_host(hba); +} + +/** + * ufshcd_pci_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int +ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ufs_host *ufs_host; + struct ufs_hba *hba; + void __iomem *mmio_base; + int err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; + } + + pci_set_master(pdev); + + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; + } + + mmio_base = pcim_iomap_table(pdev)[0]; + + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; + + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + ufshcd_dealloc_host(hba); + return err; + } + + ufs_host = ufshcd_get_variant(hba); + if (ufs_host && ufs_host->late_init) + ufs_host->late_init(hba); + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops ufshcd_pci_pm_ops = { + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) +#ifdef CONFIG_PM_SLEEP + .suspend = ufshcd_system_suspend, + .resume = ufshcd_system_resume, + .freeze = ufshcd_system_suspend, + .thaw = ufshcd_system_resume, + .poweroff = ufshcd_system_suspend, + .restore = ufshcd_pci_restore, + .prepare = ufshcd_suspend_prepare, + .complete = ufshcd_resume_complete, +#endif +}; + +static const struct pci_device_id ufshcd_pci_tbl[] = { + { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, + { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl); + +static struct pci_driver ufshcd_pci_driver = { + .name = UFSHCD, + .id_table = ufshcd_pci_tbl, + .probe = ufshcd_pci_probe, + .remove = ufshcd_pci_remove, + .shutdown = ufshcd_pci_shutdown, + .driver = { + .pm = &ufshcd_pci_pm_ops + }, +}; + +module_pci_driver(ufshcd_pci_driver); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("UFS host controller PCI glue driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c new file mode 100644 index 000000000000..e7332cc65b1f --- /dev/null +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> + +#include <ufs/ufshcd.h> +#include "ufshcd-pltfrm.h" +#include <ufs/unipro.h> + +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2 + +static int ufshcd_parse_clock_info(struct ufs_hba *hba) +{ + int ret = 0; + int cnt; + int i; + struct device *dev = hba->dev; + struct device_node *np = dev->of_node; + char *name; + u32 *clkfreq = NULL; + struct ufs_clk_info *clki; + int len = 0; + size_t sz = 0; + + if (!np) + goto out; + + cnt = of_property_count_strings(np, "clock-names"); + if (!cnt || (cnt == -EINVAL)) { + dev_info(dev, "%s: Unable to find clocks, assuming enabled\n", + __func__); + } else if (cnt < 0) { + dev_err(dev, "%s: count clock strings failed, err %d\n", + __func__, cnt); + ret = cnt; + } + + if (cnt <= 0) + goto out; + + if (!of_get_property(np, "freq-table-hz", &len)) { + dev_info(dev, "freq-table-hz property not specified\n"); + goto out; + } + + if (len <= 0) + goto out; + + sz = len / sizeof(*clkfreq); + if (sz != 2 * cnt) { + dev_err(dev, "%s len mismatch\n", "freq-table-hz"); + ret = -EINVAL; + goto out; + } + + clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq), + GFP_KERNEL); + if (!clkfreq) { + ret = -ENOMEM; + goto out; + } + + ret = of_property_read_u32_array(np, "freq-table-hz", + clkfreq, sz); + if (ret && (ret != -EINVAL)) { + dev_err(dev, "%s: error reading array %d\n", + "freq-table-hz", ret); + return ret; + } + + for (i = 0; i < sz; i += 2) { + ret = of_property_read_string_index(np, + "clock-names", i/2, (const char **)&name); + if (ret) + goto out; + + clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); + if (!clki) { + ret = -ENOMEM; + goto out; + } + + clki->min_freq = clkfreq[i]; + clki->max_freq = clkfreq[i+1]; + clki->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!clki->name) { + ret = -ENOMEM; + goto out; + } + + if (!strcmp(name, "ref_clk")) + clki->keep_link_active = true; + dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", + clki->min_freq, clki->max_freq, clki->name); + list_add_tail(&clki->list, &hba->clk_list_head); + } +out: + return ret; +} + +#define MAX_PROP_SIZE 32 +static int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg) +{ + char prop_name[MAX_PROP_SIZE]; + struct ufs_vreg *vreg = NULL; + struct device_node *np = dev->of_node; + + if (!np) { + dev_err(dev, "%s: non DT initialization\n", __func__); + goto out; + } + + snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); + if (!of_parse_phandle(np, prop_name, 0)) { + dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", + __func__, prop_name); + goto out; + } + + vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); + if (!vreg) + return -ENOMEM; + + vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!vreg->name) + return -ENOMEM; + + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); + if (of_property_read_u32(np, prop_name, &vreg->max_uA)) { + dev_info(dev, "%s: unable to find %s\n", __func__, prop_name); + vreg->max_uA = 0; + } +out: + *out_vreg = vreg; + return 0; +} + +/** + * ufshcd_parse_regulator_info - get regulator info from device tree + * @hba: per adapter instance + * + * Get regulator info from device tree for vcc, vccq, vccq2 power supplies. + * If any of the supplies are not defined it is assumed that they are always-on + * and hence return zero. If the property is defined but parsing is failed + * then return corresponding error. + */ +static int ufshcd_parse_regulator_info(struct ufs_hba *hba) +{ + int err; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vcc", &info->vcc); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq", &info->vccq); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2); +out: + return err; +} + +void ufshcd_pltfrm_shutdown(struct platform_device *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); +} +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); + +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + int ret; + + ret = of_property_read_u32(dev->of_node, "lanes-per-direction", + &hba->lanes_per_direction); + if (ret) { + dev_dbg(hba->dev, + "%s: failed to read lanes-per-direction, ret=%d\n", + __func__, ret); + hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION; + } +} + +/** + * ufshcd_get_pwr_dev_param - get finally agreed attributes for + * power mode change + * @pltfrm_param: pointer to platform parameters + * @dev_max: pointer to device attributes + * @agreed_pwr: returned agreed attributes + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param, + struct ufs_pa_layer_attr *dev_max, + struct ufs_pa_layer_attr *agreed_pwr) +{ + int min_pltfrm_gear; + int min_dev_gear; + bool is_dev_sup_hs = false; + bool is_pltfrm_max_hs = false; + + if (dev_max->pwr_rx == FAST_MODE) + is_dev_sup_hs = true; + + if (pltfrm_param->desired_working_mode == UFS_HS_MODE) { + is_pltfrm_max_hs = true; + min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear, + pltfrm_param->hs_tx_gear); + } else { + min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear, + pltfrm_param->pwm_tx_gear); + } + + /* + * device doesn't support HS but + * pltfrm_param->desired_working_mode is HS, + * thus device and pltfrm_param don't agree + */ + if (!is_dev_sup_hs && is_pltfrm_max_hs) { + pr_info("%s: device doesn't support HS\n", + __func__); + return -ENOTSUPP; + } else if (is_dev_sup_hs && is_pltfrm_max_hs) { + /* + * since device supports HS, it supports FAST_MODE. + * since pltfrm_param->desired_working_mode is also HS + * then final decision (FAST/FASTAUTO) is done according + * to pltfrm_params as it is the restricting factor + */ + agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs; + agreed_pwr->pwr_tx = agreed_pwr->pwr_rx; + } else { + /* + * here pltfrm_param->desired_working_mode is PWM. + * it doesn't matter whether device supports HS or PWM, + * in both cases pltfrm_param->desired_working_mode will + * determine the mode + */ + agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm; + agreed_pwr->pwr_tx = agreed_pwr->pwr_rx; + } + + /* + * we would like tx to work in the minimum number of lanes + * between device capability and vendor preferences. + * the same decision will be made for rx + */ + agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, + pltfrm_param->tx_lanes); + agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, + pltfrm_param->rx_lanes); + + /* device maximum gear is the minimum between device rx and tx gears */ + min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); + + /* + * if both device capabilities and vendor pre-defined preferences are + * both HS or both PWM then set the minimum gear to be the chosen + * working gear. + * if one is PWM and one is HS then the one that is PWM get to decide + * what is the gear, as it is the one that also decided previously what + * pwr the device will be configured to. + */ + if ((is_dev_sup_hs && is_pltfrm_max_hs) || + (!is_dev_sup_hs && !is_pltfrm_max_hs)) { + agreed_pwr->gear_rx = + min_t(u32, min_dev_gear, min_pltfrm_gear); + } else if (!is_dev_sup_hs) { + agreed_pwr->gear_rx = min_dev_gear; + } else { + agreed_pwr->gear_rx = min_pltfrm_gear; + } + agreed_pwr->gear_tx = agreed_pwr->gear_rx; + + agreed_pwr->hs_rate = pltfrm_param->hs_rate; + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param); + +void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param) +{ + *dev_param = (struct ufs_dev_params){ + .tx_lanes = 2, + .rx_lanes = 2, + .hs_rx_gear = UFS_HS_G3, + .hs_tx_gear = UFS_HS_G3, + .pwm_rx_gear = UFS_PWM_G4, + .pwm_tx_gear = UFS_PWM_G4, + .rx_pwr_pwm = SLOW_MODE, + .tx_pwr_pwm = SLOW_MODE, + .rx_pwr_hs = FAST_MODE, + .tx_pwr_hs = FAST_MODE, + .hs_rate = PA_HS_MODE_B, + .desired_working_mode = UFS_HS_MODE, + }; +} +EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param); + +/** + * ufshcd_pltfrm_init - probe routine of the driver + * @pdev: pointer to Platform device handle + * @vops: pointer to variant ops + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_pltfrm_init(struct platform_device *pdev, + const struct ufs_hba_variant_ops *vops) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + int irq, err; + struct device *dev = &pdev->dev; + + mmio_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mmio_base)) { + err = PTR_ERR(mmio_base); + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + err = irq; + goto out; + } + + err = ufshcd_alloc_host(dev, &hba); + if (err) { + dev_err(dev, "Allocation failed\n"); + goto out; + } + + hba->vops = vops; + + err = ufshcd_parse_clock_info(hba); + if (err) { + dev_err(dev, "%s: clock parse failed %d\n", + __func__, err); + goto dealloc_host; + } + err = ufshcd_parse_regulator_info(hba); + if (err) { + dev_err(dev, "%s: regulator init failed %d\n", + __func__, err); + goto dealloc_host; + } + + ufshcd_init_lanes_per_dir(hba); + + err = ufshcd_init(hba, mmio_base, irq); + if (err) { + dev_err(dev, "Initialization failed\n"); + goto dealloc_host; + } + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +dealloc_host: + ufshcd_dealloc_host(hba); +out: + return err; +} +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h new file mode 100644 index 000000000000..43c2e412bd99 --- /dev/null +++ b/drivers/ufs/host/ufshcd-pltfrm.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef UFSHCD_PLTFRM_H_ +#define UFSHCD_PLTFRM_H_ + +#include <ufs/ufshcd.h> + +#define UFS_PWM_MODE 1 +#define UFS_HS_MODE 2 + +struct ufs_dev_params { + u32 pwm_rx_gear; /* pwm rx gear to work in */ + u32 pwm_tx_gear; /* pwm tx gear to work in */ + u32 hs_rx_gear; /* hs rx gear to work in */ + u32 hs_tx_gear; /* hs tx gear to work in */ + u32 rx_lanes; /* number of rx lanes */ + u32 tx_lanes; /* number of tx lanes */ + u32 rx_pwr_pwm; /* rx pwm working pwr */ + u32 tx_pwr_pwm; /* tx pwm working pwr */ + u32 rx_pwr_hs; /* rx hs working pwr */ + u32 tx_pwr_hs; /* tx hs working pwr */ + u32 hs_rate; /* rate A/B to work in HS */ + u32 desired_working_mode; +}; + +int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param, + struct ufs_pa_layer_attr *dev_max, + struct ufs_pa_layer_attr *agreed_pwr); +void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param); +int ufshcd_pltfrm_init(struct platform_device *pdev, + const struct ufs_hba_variant_ops *vops); +void ufshcd_pltfrm_shutdown(struct platform_device *pdev); + +#endif /* UFSHCD_PLTFRM_H_ */ diff --git a/drivers/ufs/host/ufshci-dwc.h b/drivers/ufs/host/ufshci-dwc.h new file mode 100644 index 000000000000..6c290e272106 --- /dev/null +++ b/drivers/ufs/host/ufshci-dwc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto <jpinto@synopsys.com> + */ + +#ifndef _UFSHCI_DWC_H +#define _UFSHCI_DWC_H + +/* DWC HC UFSHCI specific Registers */ +enum dwc_specific_registers { + DWC_UFS_REG_HCLKDIV = 0xFC, +}; + +/* Clock Divider Values: Hex equivalent of frequency in MHz */ +enum clk_div_values { + DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e, + DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d, + DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8, +}; + +/* Selector Index */ +enum selector_index { + SELIND_LN0_TX = 0x00, + SELIND_LN1_TX = 0x01, + SELIND_LN0_RX = 0x04, + SELIND_LN1_RX = 0x05, +}; + +#endif /* End of Header */ |