summaryrefslogtreecommitdiff
path: root/include/ufs/ufshcd.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/ufs/ufshcd.h')
-rw-r--r--include/ufs/ufshcd.h197
1 files changed, 190 insertions, 7 deletions
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 5cf81dff60aa..25aab8ec4f86 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -16,7 +16,9 @@
#include <linux/blk-crypto-profile.h>
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
+#include <linux/msi.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-direction.h>
#include <scsi/scsi_device.h>
#include <ufs/unipro.h>
#include <ufs/ufs.h>
@@ -30,6 +32,7 @@ struct ufs_hba;
enum dev_cmd_type {
DEV_CMD_TYPE_NOP = 0x0,
DEV_CMD_TYPE_QUERY = 0x1,
+ DEV_CMD_TYPE_RPMB = 0x2,
};
enum ufs_event_type {
@@ -222,6 +225,7 @@ struct ufs_dev_cmd {
struct mutex lock;
struct completion *complete;
struct ufs_query query;
+ struct cq_entry *cqe;
};
/**
@@ -297,6 +301,12 @@ struct ufs_pwr_mode_info {
* @config_scaling_param: called to configure clock scaling parameters
* @program_key: program or evict an inline encryption key
* @event_notify: called to notify important events
+ * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
+ * @mcq_config_resource: called to configure MCQ platform resources
+ * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
+ * @op_runtime_config: called to config Operation and runtime regs Pointers
+ * @get_outstanding_cqs: called to get outstanding completion queues
+ * @config_esi: called to config Event Specific Interrupt
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -335,6 +345,13 @@ struct ufs_hba_variant_ops {
const union ufs_crypto_cfg_entry *cfg, int slot);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
+ void (*reinit_notify)(struct ufs_hba *);
+ int (*mcq_config_resource)(struct ufs_hba *hba);
+ int (*get_hba_mac)(struct ufs_hba *hba);
+ int (*op_runtime_config)(struct ufs_hba *hba);
+ int (*get_outstanding_cqs)(struct ufs_hba *hba,
+ unsigned long *ocqs);
+ int (*config_esi)(struct ufs_hba *hba);
};
/* clock gating state */
@@ -566,9 +583,9 @@ enum ufshcd_quirks {
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
/*
- * This quirk allows only sg entries aligned with page size.
+ * Align DMA SG entries on a 4 KiB boundary.
*/
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
+ UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14,
/*
* This quirk needs to be enabled if the host controller does not
@@ -593,6 +610,12 @@ enum ufshcd_quirks {
* auto-hibernate capability but it's FASTAUTO only.
*/
UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
+
+ /*
+ * This quirk needs to be enabled if the host controller needs
+ * to reinit the device after switching to maximum gear.
+ */
+ UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19,
};
enum ufshcd_caps {
@@ -724,6 +747,51 @@ struct ufs_hba_monitor {
};
/**
+ * struct ufshcd_res_info_t - MCQ related resource regions
+ *
+ * @name: resource name
+ * @resource: pointer to resource region
+ * @base: register base address
+ */
+struct ufshcd_res_info {
+ const char *name;
+ struct resource *resource;
+ void __iomem *base;
+};
+
+enum ufshcd_res {
+ RES_UFS,
+ RES_MCQ,
+ RES_MCQ_SQD,
+ RES_MCQ_SQIS,
+ RES_MCQ_CQD,
+ RES_MCQ_CQIS,
+ RES_MCQ_VS,
+ RES_MAX,
+};
+
+/**
+ * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
+ *
+ * @offset: Doorbell Address Offset
+ * @stride: Steps proportional to queue [0...31]
+ * @base: base address
+ */
+struct ufshcd_mcq_opr_info_t {
+ unsigned long offset;
+ unsigned long stride;
+ void __iomem *base;
+};
+
+enum ufshcd_mcq_opr {
+ OPR_SQD,
+ OPR_SQIS,
+ OPR_CQD,
+ OPR_CQIS,
+ OPR_MAX,
+};
+
+/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
* @ucdl_base_addr: UFS Command Descriptor base address
@@ -747,6 +815,7 @@ struct ufs_hba_monitor {
* @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
+ * @mcq_capabilities: UFS Multi Circular Queue capabilities
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
@@ -754,6 +823,7 @@ struct ufs_hba_monitor {
* @vops: pointer to variant specific operations
* @vps: pointer to variant specific parameters
* @priv: pointer to variant specific private data
+ * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
* @irq: Irq number of the controller
* @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
* @dev_ref_clk_freq: reference clock frequency
@@ -808,6 +878,7 @@ struct ufs_hba_monitor {
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
+ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
* @clk_scaling_lock: used to serialize device commands and clock scaling
* @desc_size: descriptor sizes reported by device
* @scsi_block_reqs_cnt: reference counting for scsi block requests
@@ -828,8 +899,17 @@ struct ufs_hba_monitor {
* ee_ctrl_mask
* @luns_avail: number of regular and well known LUNs supported by the UFS
* device
+ * @nr_hw_queues: number of hardware queues configured
+ * @nr_queues: number of Queues of different queue types
* @complete_put: whether or not to call ufshcd_rpm_put() from inside
* ufshcd_resume_complete()
+ * @ext_iid_sup: is EXT_IID is supported by UFSHC
+ * @mcq_sup: is mcq supported by UFSHC
+ * @mcq_enabled: is mcq ready to accept requests
+ * @res: array of resource info of MCQ registers
+ * @mcq_base: Multi circular queue registers base address
+ * @uhq: array of supported hardware queues
+ * @dev_cmd_queue: Queue for issuing device management commands
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -871,12 +951,16 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
+ u32 mcq_capabilities;
int nutmrs;
u32 reserved_slot;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
void *priv;
+#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ size_t sg_entry_size;
+#endif
unsigned int irq;
bool is_irq_enabled;
enum ufs_ref_clk_freq dev_ref_clk_freq;
@@ -895,6 +979,7 @@ struct ufs_hba {
struct completion *uic_async_done;
enum ufshcd_state ufshcd_state;
+ bool logical_unit_scan_finished;
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
@@ -951,8 +1036,8 @@ struct ufs_hba {
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
+ struct mutex wb_mutex;
struct rw_semaphore clk_scaling_lock;
- unsigned char desc_size[QUERY_DESC_IDN_MAX];
atomic_t scsi_block_reqs_cnt;
struct device bsg_dev;
@@ -977,9 +1062,88 @@ struct ufs_hba {
u32 debugfs_ee_rate_limit_ms;
#endif
u32 luns_avail;
+ unsigned int nr_hw_queues;
+ unsigned int nr_queues[HCTX_MAX_TYPES];
bool complete_put;
+ bool ext_iid_sup;
+ bool scsi_host_added;
+ bool mcq_sup;
+ bool mcq_enabled;
+ struct ufshcd_res_info res[RES_MAX];
+ void __iomem *mcq_base;
+ struct ufs_hw_queue *uhq;
+ struct ufs_hw_queue *dev_cmd_queue;
+ struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
+};
+
+/**
+ * struct ufs_hw_queue - per hardware queue structure
+ * @mcq_sq_head: base address of submission queue head pointer
+ * @mcq_sq_tail: base address of submission queue tail pointer
+ * @mcq_cq_head: base address of completion queue head pointer
+ * @mcq_cq_tail: base address of completion queue tail pointer
+ * @sqe_base_addr: submission queue entry base address
+ * @sqe_dma_addr: submission queue dma address
+ * @cqe_base_addr: completion queue base address
+ * @cqe_dma_addr: completion queue dma address
+ * @max_entries: max number of slots in this hardware queue
+ * @id: hardware queue ID
+ * @sq_tp_slot: current slot to which SQ tail pointer is pointing
+ * @sq_lock: serialize submission queue access
+ * @cq_tail_slot: current slot to which CQ tail pointer is pointing
+ * @cq_head_slot: current slot to which CQ head pointer is pointing
+ * @cq_lock: Synchronize between multiple polling instances
+ */
+struct ufs_hw_queue {
+ void __iomem *mcq_sq_head;
+ void __iomem *mcq_sq_tail;
+ void __iomem *mcq_cq_head;
+ void __iomem *mcq_cq_tail;
+
+ void *sqe_base_addr;
+ dma_addr_t sqe_dma_addr;
+ struct cq_entry *cqe_base_addr;
+ dma_addr_t cqe_dma_addr;
+ u32 max_entries;
+ u32 id;
+ u32 sq_tail_slot;
+ spinlock_t sq_lock;
+ u32 cq_tail_slot;
+ u32 cq_head_slot;
+ spinlock_t cq_lock;
};
+static inline bool is_mcq_enabled(struct ufs_hba *hba)
+{
+ return hba->mcq_enabled;
+}
+
+#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+{
+ return hba->sg_entry_size;
+}
+
+static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
+{
+ WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
+ hba->sg_entry_size = sg_entry_size;
+}
+#else
+static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+{
+ return sizeof(struct ufshcd_sg_entry);
+}
+
+#define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
+ ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
+#endif
+
+static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
+{
+ return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
+}
+
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{
@@ -1035,6 +1199,16 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
}
+#define ufsmcq_writel(hba, val, reg) \
+ writel((val), (hba)->mcq_base + (reg))
+#define ufsmcq_readl(hba, reg) \
+ readl((hba)->mcq_base + (reg))
+
+#define ufsmcq_writelx(hba, val, reg) \
+ writel_relaxed((val), (hba)->mcq_base + (reg))
+#define ufsmcq_readlx(hba, reg) \
+ readl_relaxed((hba)->mcq_base + (reg))
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -1071,6 +1245,11 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
/**
* ufshcd_set_variant - set variant specific data to the hba
@@ -1100,8 +1279,12 @@ extern int ufshcd_runtime_resume(struct device *dev);
#ifdef CONFIG_PM_SLEEP
extern int ufshcd_system_suspend(struct device *dev);
extern int ufshcd_system_resume(struct device *dev);
+extern int ufshcd_system_freeze(struct device *dev);
+extern int ufshcd_system_thaw(struct device *dev);
+extern int ufshcd_system_restore(struct device *dev);
#endif
extern int ufshcd_shutdown(struct ufs_hba *hba);
+
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
int adapt_val);
@@ -1186,9 +1369,6 @@ void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
@@ -1201,7 +1381,10 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
-
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
+ struct ufs_ehs *ehs_rsp, int sg_cnt,
+ struct scatterlist *sg_list, enum dma_data_direction dir);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);