summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/ad525x_dpot.c2
-rw-r--r--drivers/misc/atmel-ssc.c24
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rtl8411.c8
-rw-r--r--drivers/misc/cardreader/rts5209.c5
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rts5228.c747
-rw-r--r--drivers/misc/cardreader/rts5228.h168
-rw-r--r--drivers/misc/cardreader/rts5229.c5
-rw-r--r--drivers/misc/cardreader/rts5249.c28
-rw-r--r--drivers/misc/cardreader/rts5260.c23
-rw-r--r--drivers/misc/cardreader/rts5261.c32
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c129
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h5
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c2
-rw-r--r--drivers/misc/cb710/core.c28
-rw-r--r--drivers/misc/cb710/sgbuf2.c1
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/hcalls.c42
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/cxl/vphb.c4
-rw-r--r--drivers/misc/echo/echo.c6
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c4
-rw-r--r--drivers/misc/enclosure.c8
-rw-r--r--drivers/misc/genwqe/card_base.c32
-rw-r--r--drivers/misc/genwqe/card_ddcb.c20
-rw-r--r--drivers/misc/genwqe/card_debugfs.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c24
-rw-r--r--drivers/misc/genwqe/card_sysfs.c8
-rw-r--r--drivers/misc/genwqe/card_utils.c30
-rw-r--r--drivers/misc/habanalabs/Makefile11
-rw-r--r--drivers/misc/habanalabs/common/Makefile7
-rw-r--r--drivers/misc/habanalabs/common/asid.c (renamed from drivers/misc/habanalabs/asid.c)0
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c (renamed from drivers/misc/habanalabs/command_buffer.c)82
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c (renamed from drivers/misc/habanalabs/command_submission.c)113
-rw-r--r--drivers/misc/habanalabs/common/context.c (renamed from drivers/misc/habanalabs/context.c)39
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c (renamed from drivers/misc/habanalabs/debugfs.c)29
-rw-r--r--drivers/misc/habanalabs/common/device.c (renamed from drivers/misc/habanalabs/device.c)90
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c (renamed from drivers/misc/habanalabs/firmware_if.c)114
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h (renamed from drivers/misc/habanalabs/habanalabs.h)191
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c (renamed from drivers/misc/habanalabs/habanalabs_drv.c)3
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c (renamed from drivers/misc/habanalabs/habanalabs_ioctl.c)24
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c (renamed from drivers/misc/habanalabs/hw_queue.c)165
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c (renamed from drivers/misc/habanalabs/hwmon.c)19
-rw-r--r--drivers/misc/habanalabs/common/irq.c (renamed from drivers/misc/habanalabs/irq.c)38
-rw-r--r--drivers/misc/habanalabs/common/memory.c (renamed from drivers/misc/habanalabs/memory.c)5
-rw-r--r--drivers/misc/habanalabs/common/mmu.c (renamed from drivers/misc/habanalabs/mmu.c)3
-rw-r--r--drivers/misc/habanalabs/common/pci.c (renamed from drivers/misc/habanalabs/pci.c)151
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c (renamed from drivers/misc/habanalabs/sysfs.c)14
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c1069
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h27
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c12
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c5
-rw-r--r--drivers/misc/habanalabs/goya/Makefile2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c216
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h24
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c15
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c2
-rw-r--r--drivers/misc/habanalabs/include/common/armcp_if.h (renamed from drivers/misc/habanalabs/include/armcp_if.h)14
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h (renamed from drivers/misc/habanalabs/include/hl_boot_if.h)14
-rw-r--r--drivers/misc/habanalabs/include/common/qman_if.h (renamed from drivers/misc/habanalabs/include/qman_if.h)0
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h21
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h7
-rw-r--r--drivers/misc/hpilo.c11
-rw-r--r--drivers/misc/hpilo.h22
-rw-r--r--drivers/misc/ibmasm/command.c6
-rw-r--r--drivers/misc/ibmasm/dot_command.c6
-rw-r--r--drivers/misc/ibmasm/event.c4
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c2
-rw-r--r--drivers/misc/ibmvmc.c6
-rw-r--r--drivers/misc/lattice-ecp3-config.c19
-rw-r--r--drivers/misc/lkdtm/bugs.c53
-rw-r--r--drivers/misc/lkdtm/heap.c9
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c22
-rw-r--r--drivers/misc/lkdtm/usercopy.c7
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/bus-fixup.c23
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.c8
-rw-r--r--drivers/misc/mei/hbm.c74
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h7
-rw-r--r--drivers/misc/mei/hw-me.c124
-rw-r--r--drivers/misc/mei/hw-me.h24
-rw-r--r--drivers/misc/mei/hw-txe.c5
-rw-r--r--drivers/misc/mei/hw.h8
-rw-r--r--drivers/misc/mei/main.c31
-rw-r--r--drivers/misc/mei/mei_dev.h4
-rw-r--r--drivers/misc/mei/pci-me.c25
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c10
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c4
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c1
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c8
-rw-r--r--drivers/misc/mic/host/mic_intr.c4
-rw-r--r--drivers/misc/mic/host/mic_main.c1
-rw-r--r--drivers/misc/mic/host/mic_x100.c13
-rw-r--r--drivers/misc/mic/host/mic_x100.h9
-rw-r--r--drivers/misc/mic/scif/scif_api.c6
-rw-r--r--drivers/misc/mic/scif/scif_dma.c3
-rw-r--r--drivers/misc/mic/scif/scif_epd.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c34
-rw-r--r--drivers/misc/mic/scif/scif_nm.c17
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c18
-rw-r--r--drivers/misc/mic/scif/scif_ports.c9
-rw-r--r--drivers/misc/mic/scif/scif_rma.c12
-rw-r--r--drivers/misc/mic/vop/vop_main.c9
-rw-r--r--drivers/misc/pch_phub.c57
-rw-r--r--drivers/misc/phantom.c20
-rw-r--r--drivers/misc/pti.c16
-rw-r--r--drivers/misc/sgi-gru/grufault.c1
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c1
-rw-r--r--drivers/misc/sgi-gru/grukservices.c1
-rw-r--r--drivers/misc/sgi-xp/xp_main.c4
-rw-r--r--drivers/misc/sram-exec.c2
-rw-r--r--drivers/misc/ti-st/st_core.c79
-rw-r--r--drivers/misc/ti-st/st_kim.c71
-rw-r--r--drivers/misc/tifm_7xx1.c30
-rw-r--r--drivers/misc/uacce/uacce.c9
128 files changed, 3273 insertions, 1759 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e1b1ba5e2b92..ce136d685d14 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -24,7 +24,7 @@ config AD525X_DPOT
AD5271, AD5272, AD5274
digital potentiometer chips.
- See Documentation/misc-devices/ad525x_dpot.txt for the
+ See Documentation/misc-devices/ad525x_dpot.rst for the
userspace interface.
This driver can also be built as a module. If so, the module
@@ -83,7 +83,7 @@ config IBM_ASM
WARNING: This software may not be supported or function
correctly on your IBM server. Please consult the IBM ServerProven
- website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
+ website <https://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
for information on the specific driver level and support statement
for your IBM server.
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index ccce3226a571..6f164522b028 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -58,7 +58,7 @@
* AD5272 1 1024 20, 50, 100 (50-TP)
* AD5274 1 256 20, 50, 100 (50-TP)
*
- * See Documentation/misc-devices/ad525x_dpot.txt for more info.
+ * See Documentation/misc-devices/ad525x_dpot.rst for more info.
*
* derived from ad5258.c
* Copyright (c) 2009 Cyber Switching, Inc.
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index ab4144ea1f11..d6cd5537126c 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/atmel-ssc.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -20,7 +20,7 @@
#include "../../sound/soc/atmel/atmel_ssc_dai.h"
/* Serialize access to ssc_list and user count */
-static DEFINE_SPINLOCK(user_lock);
+static DEFINE_MUTEX(user_lock);
static LIST_HEAD(ssc_list);
struct ssc_device *ssc_request(unsigned int ssc_num)
@@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
int ssc_valid = 0;
struct ssc_device *ssc;
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_for_each_entry(ssc, &ssc_list, list) {
if (ssc->pdev->dev.of_node) {
if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
@@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
}
if (!ssc_valid) {
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
return ERR_PTR(-ENODEV);
}
if (ssc->user) {
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
dev_dbg(&ssc->pdev->dev, "module busy\n");
return ERR_PTR(-EBUSY);
}
ssc->user++;
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
clk_prepare(ssc->clk);
@@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc)
{
bool disable_clk = true;
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
if (ssc->user)
ssc->user--;
else {
disable_clk = false;
dev_dbg(&ssc->pdev->dev, "device already free\n");
}
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
if (disable_clk)
clk_unprepare(ssc->clk);
@@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev)
return -ENXIO;
}
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_add_tail(&ssc->list, &ssc_list);
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
platform_set_drvdata(pdev, ssc);
@@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev)
ssc_sound_dai_remove(ssc);
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_del(&ssc->list);
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
return 0;
}
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 33bba1802289..80d87e8a0bea 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -923,7 +923,7 @@ struct c2port_device *c2port_device_register(char *name,
}
dev_set_drvdata(c2dev->dev, c2dev);
- strncpy(c2dev->name, name, C2PORT_NAME_LEN);
+ strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1);
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index 1f56267ed2f4..895128475d83 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index 489ebe907688..a07674ed0596 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -37,10 +37,11 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg1 = 0;
u8 reg3 = 0;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
if (!rtsx_vendor_setting_valid(reg1))
@@ -52,16 +53,17 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
- rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
+ pci_read_config_byte(pdev, PCR_SETTING_REG3, &reg3);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
}
static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg = 0;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 659056164b21..39a6a7ecc32e 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -23,9 +23,10 @@ static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (rts5209_vendor_setting1_valid(reg)) {
@@ -34,7 +35,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->aspm_en = rts5209_reg_to_aspm(reg);
}
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (rts5209_vendor_setting2_valid(reg)) {
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 3a9467aaa435..f5f392ddf3d6 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -56,9 +56,10 @@ static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
@@ -69,7 +70,7 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
new file mode 100644
index 000000000000..28feab1449ab
--- /dev/null
+++ b/drivers/misc/cardreader/rts5228.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky WU <ricky_wu@realtek.com>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5228.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5228_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0xB5, 0xB5, 0xB5},
+ {0xE6, 0x7E, 0xFE},
+ {0x6B, 0x6B, 0x6B},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ u32 reg;
+
+ /* 0x724~0x727 */
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+
+ /* 0x814~0x817 */
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ pcr->rtd3_en = rtsx_reg_to_rtd3(reg);
+ if (rtsx_check_mmc_support(reg))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40);
+}
+
+static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5228_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5228_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5228_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5228_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5228_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5228_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5228_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5228_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1,
+ RTS5228_LDO1_TUNE_MASK, RTS5228_LDO1_33);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_SOFTSTART);
+ mdelay(2);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_FULLON);
+
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO3318_POWERON, RTS5228_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5228_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5228_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5228_CARD_PWR_CTL,
+ RTS5228_PUPDC, RTS5228_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
+ RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
+ RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5228_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5228_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5228_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5228_stop_cmd(pcr);
+ rts5228_switch_output_voltage(pcr, OUTPUT_3V3);
+}
+
+static void rts5228_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
+}
+
+static void rts5228_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
+}
+
+static int rts5228_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5228_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO_POWERON_MASK, 0);
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, CFG_SD_POW_AUTO_PD, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5228_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_LMT_THD_MASK,
+ RTS5228_LDO1_LMT_THD_1500);
+
+ rtsx_pci_read_register(pcr, RTS5228_LDO1_CFG0, &val);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5228_enable_ocp(pcr);
+
+ } else {
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5228_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(1000);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5228_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5228_clear_ocpstat(pcr);
+ rts5228_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+
+ if (0 == (lval & 0x0F))
+ rtsx_pci_enable_oobs_polling(pcr);
+ else
+ rtsx_pci_disable_oobs_polling(pcr);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5228_init_from_cfg(pcr);
+
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5228_fill_driving(pcr, OUTPUT_3V3);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ return 0;
+}
+
+static void rts5228_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 mask, val;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5228_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 mask, val;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
+ mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ mdelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5228_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5228_enable_aspm(pcr, true);
+ else
+ rts5228_disable_aspm(pcr, false);
+}
+
+static void rts5228_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5228_pcr_ops = {
+ .fetch_vendor_settings = rtsx5228_fetch_vendor_settings,
+ .turn_on_led = rts5228_turn_on_led,
+ .turn_off_led = rts5228_turn_off_led,
+ .extra_init_hw = rts5228_extra_init_hw,
+ .enable_auto_blink = rts5228_enable_auto_blink,
+ .disable_auto_blink = rts5228_disable_auto_blink,
+ .card_power_on = rts5228_card_power_on,
+ .card_power_off = rts5228_card_power_off,
+ .switch_output_voltage = rts5228_switch_output_voltage,
+ .force_power_down = rts5228_force_power_down,
+ .stop_cmd = rts5228_stop_cmd,
+ .set_aspm = rts5228_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5228_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5228_enable_ocp,
+ .disable_ocp = rts5228_disable_ocp,
+ .init_ocp = rts5228_init_ocp,
+ .process_ocp = rts5228_process_ocp,
+ .clear_ocpstat = rts5228_clear_ocpstat,
+ .optimize_phy = rts5228_optimize_phy,
+};
+
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5228_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5228_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5228_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5228_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = clk - 4;
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = 125/clk + 3;
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2) - 1;
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5228_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5228_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5228_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5228_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5228_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5228_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930;
+}
diff --git a/drivers/misc/cardreader/rts5228.h b/drivers/misc/cardreader/rts5228.h
new file mode 100644
index 000000000000..6a872246aeed
--- /dev/null
+++ b/drivers/misc/cardreader/rts5228.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky WU <ricky_wu@realtek.com>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5228_H
+#define RTS5228_H
+
+
+#define RTS5228_AUTOLOAD_CFG0 0xFF7B
+#define RTS5228_AUTOLOAD_CFG1 0xFF7C
+#define RTS5228_AUTOLOAD_CFG2 0xFF7D
+#define RTS5228_AUTOLOAD_CFG3 0xFF7E
+#define RTS5228_AUTOLOAD_CFG4 0xFF7F
+
+#define RTS5228_REG_VREF 0xFE97
+#define RTS5228_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5228_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5228_SSC_DEPTH_MASK 0x07
+#define RTS5228_SSC_DEPTH_DISALBE 0x00
+#define RTS5228_SSC_DEPTH_8M 0x01
+#define RTS5228_SSC_DEPTH_4M 0x02
+#define RTS5228_SSC_DEPTH_2M 0x03
+#define RTS5228_SSC_DEPTH_1M 0x04
+#define RTS5228_SSC_DEPTH_512K 0x05
+#define RTS5228_SSC_DEPTH_256K 0x06
+#define RTS5228_SSC_DEPTH_128K 0x07
+
+/* DMACTL 0xFE2C */
+#define RTS5228_DMA_PACK_SIZE_MASK 0xF0
+
+#define RTS5228_REG_LDO12_CFG 0xFF6E
+#define RTS5228_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5228_LDO12_100 (0x00<<1)
+#define RTS5228_LDO12_105 (0x01<<1)
+#define RTS5228_LDO12_110 (0x02<<1)
+#define RTS5228_LDO12_115 (0x03<<1)
+#define RTS5228_LDO12_120 (0x04<<1)
+#define RTS5228_LDO12_125 (0x05<<1)
+#define RTS5228_LDO12_130 (0x06<<1)
+#define RTS5228_LDO12_135 (0x07<<1)
+#define RTS5228_REG_PWD_LDO12 (0x01<<0)
+
+#define RTS5228_REG_LDO12_L12 0xFF6F
+#define RTS5228_LDO12_L12_MASK (0x07<<4)
+#define RTS5228_LDO12_L12_120 (0x04<<4)
+
+/* LDO control register */
+#define RTS5228_CARD_PWR_CTL 0xFD50
+#define RTS5228_PUPDC (0x01<<5)
+
+#define RTS5228_LDO1233318_POW_CTL 0xFF70
+#define RTS5228_LDO3318_POWERON (0x01<<3)
+#define RTS5228_LDO1_POWEROFF (0x00<<0)
+#define RTS5228_LDO1_SOFTSTART (0x01<<0)
+#define RTS5228_LDO1_FULLON (0x03<<0)
+#define RTS5228_LDO1_POWERON_MASK (0x03<<0)
+#define RTS5228_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5228_DV3318_CFG 0xFF71
+#define RTS5228_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5228_DV3318_17 (0x00<<4)
+#define RTS5228_DV3318_1V75 (0x01<<4)
+#define RTS5228_DV3318_18 (0x02<<4)
+#define RTS5228_DV3318_1V85 (0x03<<4)
+#define RTS5228_DV3318_19 (0x04<<4)
+#define RTS5228_DV3318_33 (0x07<<4)
+#define RTS5228_DV3318_SR_MASK (0x03<<2)
+#define RTS5228_DV3318_SR_0 (0x00<<2)
+#define RTS5228_DV3318_SR_250 (0x01<<2)
+#define RTS5228_DV3318_SR_500 (0x02<<2)
+#define RTS5228_DV3318_SR_1000 (0x03<<2)
+
+#define RTS5228_LDO1_CFG0 0xFF72
+#define RTS5228_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5228_LDO1_OCP_EN (0x01<<4)
+#define RTS5228_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5228_LDO1_OCP_LMT_EN (0x01<<1)
+
+#define RTS5228_LDO1_OCP_THD_730 (0x00<<5)
+#define RTS5228_LDO1_OCP_THD_780 (0x01<<5)
+#define RTS5228_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5228_LDO1_OCP_THD_930 (0x03<<5)
+#define RTS5228_LDO1_OCP_THD_1000 (0x04<<5)
+#define RTS5228_LDO1_OCP_THD_1070 (0x05<<5)
+#define RTS5228_LDO1_OCP_THD_1140 (0x06<<5)
+#define RTS5228_LDO1_OCP_THD_1220 (0x07<<5)
+
+#define RTS5228_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5228_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5228_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5228_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5228_LDO1_CFG1 0xFF73
+#define RTS5228_LDO1_SR_TIME_MASK (0x03<<6)
+#define RTS5228_LDO1_SR_0_0 (0x00<<6)
+#define RTS5228_LDO1_SR_0_25 (0x01<<6)
+#define RTS5228_LDO1_SR_0_5 (0x02<<6)
+#define RTS5228_LDO1_SR_1_0 (0x03<<6)
+#define RTS5228_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5228_LDO1_18 (0x05<<1)
+#define RTS5228_LDO1_33 (0x07<<1)
+#define RTS5228_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5228_AUXCLK_GAT_CTL 0xFF74
+
+#define RTS5228_REG_RREF_CTL_0 0xFF75
+#define RTS5228_FORCE_RREF_EXTL (0x01<<7)
+#define RTS5228_REG_BG33_MASK (0x07<<0)
+#define RTS5228_RREF_12_1V (0x04<<0)
+#define RTS5228_RREF_12_3V (0x05<<0)
+
+#define RTS5228_REG_RREF_CTL_1 0xFF76
+
+#define RTS5228_REG_RREF_CTL_2 0xFF77
+#define RTS5228_TEST_INTL_RREF (0x01<<7)
+#define RTS5228_DGLCH_TIME_MASK (0x03<<5)
+#define RTS5228_DGLCH_TIME_50 (0x00<<5)
+#define RTS5228_DGLCH_TIME_75 (0x01<<5)
+#define RTS5228_DGLCH_TIME_100 (0x02<<5)
+#define RTS5228_DGLCH_TIME_125 (0x03<<5)
+#define RTS5228_REG_REXT_TUNE_MASK (0x1F<<0)
+
+#define RTS5228_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+
+
+/* Single LUN, support SD */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5228_H */
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9f080a32ef50..89e6f124ca5c 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -23,9 +23,10 @@ static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
@@ -37,7 +38,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 6c6c9e95a29f..941b3d77f1e9 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -55,9 +55,10 @@ static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
@@ -70,7 +71,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
@@ -93,32 +94,33 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
u32 lval;
- if (CHK_PCI_PID(pcr, PID_524A))
- rtsx_pci_read_config_dword(pcr,
- PCR_ASPM_SETTING_REG1, &lval);
- else
- rtsx_pci_read_config_dword(pcr,
- PCR_ASPM_SETTING_REG2, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 7a9dbb778e84..b9f66b1384a6 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -64,9 +64,10 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
@@ -79,7 +80,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
@@ -496,21 +497,27 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
struct rtsx_cr_option *option = &pcr->option;
u32 lval;
- rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
@@ -518,7 +525,7 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 195822ec858e..471961487ff8 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -59,9 +59,11 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
+
/* 0x814~0x817 */
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (!rts5261_vendor_setting_valid(reg)) {
@@ -76,7 +78,7 @@ static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->flags |= PCR_REVERSE_SOCKET;
/* 0x724~0x727 */
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
pcr->aspm_en = rts5261_reg_to_aspm(reg);
@@ -361,6 +363,7 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr)
static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
int retval;
u32 lval, i;
u8 valid, efuse_valid, tmp;
@@ -386,8 +389,7 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
if (efuse_valid == 0) {
- retval = rtsx_pci_read_config_dword(pcr,
- PCR_SETTING_REG2, &lval);
+ retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
if (retval != 0)
pcr_dbg(pcr, "read 0x814 DW fail\n");
pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
@@ -399,9 +401,9 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
REG_EFUSE_POR, 0);
pcr_dbg(pcr, "Disable efuse por!\n");
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
lval = lval & 0x00FFFFFF;
- retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval);
if (retval != 0)
pcr_dbg(pcr, "write config fail\n");
@@ -410,27 +412,33 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
@@ -439,7 +447,7 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 0d5928bc1b6d..37ccc67f4914 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -23,6 +23,7 @@
#include "rtsx_pcr.h"
#include "rts5261.h"
+#include "rts5228.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -50,6 +51,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -206,16 +208,10 @@ int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
int err, i, finished = 0;
u8 tmp;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
+ rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
+ rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
+ rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
@@ -247,16 +243,10 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
int err, i, finished = 0;
u16 data;
- u8 *ptr, tmp;
+ u8 tmp, val1, val2;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
+ rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
@@ -272,17 +262,9 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
if (!finished)
return -ETIMEDOUT;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
-
- ptr = rtsx_pci_get_cmd_data(pcr);
- data = ((u16)ptr[1] << 8) | ptr[0];
+ rtsx_pci_read_register(pcr, PHYDATA0, &val1);
+ rtsx_pci_read_register(pcr, PHYDATA1, &val2);
+ data = val1 | (val2 << 8);
if (val)
*val = data;
@@ -417,7 +399,7 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
if (len > 0xFFFF)
val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
| (((u64)len >> 16) << 6) | option;
@@ -723,6 +705,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (PCI_PID(pcr) == PID_5261)
return rts5261_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
+ if (PCI_PID(pcr) == PID_5228)
+ return rts5228_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
@@ -1111,8 +1096,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
mutex_unlock(&pcr->pcr_mutex);
}
-#ifdef CONFIG_PM
-static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
{
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
@@ -1126,7 +1110,6 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state);
}
-#endif
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
{
@@ -1202,6 +1185,36 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
}
}
+void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
+{
+ u16 val;
+
+ if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ rtsx_pci_read_phy_register(pcr, 0x01, &val);
+ val |= 1<<9;
+ rtsx_pci_write_phy_register(pcr, 0x01, val);
+ }
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
+ rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
+
+}
+
+void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
+{
+ u16 val;
+
+ if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ rtsx_pci_read_phy_register(pcr, 0x01, &val);
+ val &= ~(1<<9);
+ rtsx_pci_write_phy_register(pcr, 0x01, val);
+ }
+ rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
+
+}
+
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
@@ -1231,9 +1244,13 @@ int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
int err;
- pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
+ if (PCI_PID(pcr) == PID_5228)
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
+ RTS5228_LDO1_SR_0_5);
+
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
rtsx_pci_enable_bus_int(pcr);
@@ -1280,6 +1297,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (PCI_PID(pcr) == PID_5261)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5261_SSC_DEPTH_2M);
+ else if (PCI_PID(pcr) == PID_5228)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5228_SSC_DEPTH_2M);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
@@ -1314,6 +1334,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_525A:
case PID_5260:
case PID_5261:
+ case PID_5228:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1324,9 +1345,10 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_init_ocp(pcr);
/* Enable clk_request_n to enable clock power management */
- rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
/* Enter L1 when host tx idle */
- rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
+ pci_write_config_byte(pdev, 0x70F, 0x5B);
if (pcr->ops->extra_init_hw) {
err = pcr->ops->extra_init_hw(pcr);
@@ -1401,6 +1423,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5261:
rts5261_init_params(pcr);
break;
+
+ case 0x5228:
+ rts5228_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1604,10 +1630,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
-#ifdef CONFIG_PM
-
-static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
+static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
{
+ struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
@@ -1623,17 +1648,15 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
rtsx_pci_power_off(pcr, HOST_ENTER_S3);
- pci_save_state(pcidev);
- pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
- pci_disable_device(pcidev);
- pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+ device_wakeup_disable(dev_d);
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
-static int rtsx_pci_resume(struct pci_dev *pcidev)
+static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
{
+ struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
int ret = 0;
@@ -1645,13 +1668,6 @@ static int rtsx_pci_resume(struct pci_dev *pcidev)
mutex_lock(&pcr->pcr_mutex);
- pci_set_power_state(pcidev, PCI_D0);
- pci_restore_state(pcidev);
- ret = pci_enable_device(pcidev);
- if (ret)
- goto out;
- pci_set_master(pcidev);
-
ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
if (ret)
goto out;
@@ -1667,6 +1683,8 @@ out:
return ret;
}
+#ifdef CONFIG_PM
+
static void rtsx_pci_shutdown(struct pci_dev *pcidev)
{
struct pcr_handle *handle;
@@ -1686,19 +1704,18 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
#else /* CONFIG_PM */
-#define rtsx_pci_suspend NULL
-#define rtsx_pci_resume NULL
#define rtsx_pci_shutdown NULL
#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
+
static struct pci_driver rtsx_pci_driver = {
.name = DRV_NAME_RTSX_PCI,
.id_table = rtsx_pci_ids,
.probe = rtsx_pci_probe,
.remove = rtsx_pci_remove,
- .suspend = rtsx_pci_suspend,
- .resume = rtsx_pci_resume,
+ .driver.pm = &rtsx_pci_pm_ops,
.shutdown = rtsx_pci_shutdown,
};
module_pci_driver(rtsx_pci_driver);
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 024cbd998b2a..6b322db8738e 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
void rts5261_init_params(struct rtsx_pcr *pcr);
+void rts5228_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -70,6 +71,8 @@ static inline u8 map_sd_drive(int idx)
#define rts5209_vendor_setting1_valid(reg) (!((reg) & 0x80))
#define rts5209_vendor_setting2_valid(reg) ((reg) & 0x80)
+#define rtsx_check_mmc_support(reg) ((reg) & 0x10)
+#define rtsx_reg_to_rtd3(reg) ((reg) & 0x02)
#define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03)
#define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03)
#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
@@ -100,6 +103,8 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr);
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index a328cab11014..59eda55d92a3 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -759,7 +759,7 @@ static int rtsx_usb_post_reset(struct usb_interface *intf)
return 0;
}
-static struct usb_device_id rtsx_usb_usb_ids[] = {
+static const struct usb_device_id rtsx_usb_usb_ids[] = {
{ USB_DEVICE(0x0BDA, 0x0129) },
{ USB_DEVICE(0x0BDA, 0x0139) },
{ USB_DEVICE(0x0BDA, 0x0140) },
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index b290bc2ee240..55b7ee0e8f93 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -166,37 +166,24 @@ void cb710_set_irq_handler(struct cb710_slot *slot,
}
EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
-#ifdef CONFIG_PM
-
-static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused cb710_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
devm_free_irq(&pdev->dev, pdev->irq, chip);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- if (state.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int cb710_resume(struct pci_dev *pdev)
+static int __maybe_unused cb710_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pcim_enable_device(pdev);
- if (err)
- return err;
return devm_request_irq(&pdev->dev, pdev->irq,
cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
}
-#endif /* CONFIG_PM */
-
static int cb710_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -312,15 +299,14 @@ static const struct pci_device_id cb710_pci_tbl[] = {
{ 0, }
};
+static SIMPLE_DEV_PM_OPS(cb710_pm_ops, cb710_suspend, cb710_resume);
+
static struct pci_driver cb710_driver = {
.name = KBUILD_MODNAME,
.id_table = cb710_pci_tbl,
.probe = cb710_probe,
.remove = cb710_remove_one,
-#ifdef CONFIG_PM
- .suspend = cb710_suspend,
- .resume = cb710_resume,
-#endif
+ .driver.pm = &cb710_pm_ops,
};
static int __init cb710_init_module(void)
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index dfd2969e3628..e5a4ed3701eb 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -117,6 +117,7 @@ static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
/**
* cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
* @miter: sg mapping iterator used for writing
+ * @data: data to write to sg buffer
*
* Description:
* Writes 32-bit word starting at byte pointed to by @miter@
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index cb9cca35a226..5b93ff51d82a 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -175,7 +175,7 @@ static int update_devicetree(struct cxl *adapter, s32 scope)
struct update_nodes_workarea *unwa;
u32 action, node_count;
int token, rc, i;
- __be32 *data, drc_index, phandle;
+ __be32 *data, phandle;
char *buf;
token = rtas_token("ibm,update-nodes");
@@ -213,7 +213,7 @@ static int update_devicetree(struct cxl *adapter, s32 scope)
break;
case OPCODE_ADD:
/* nothing to do, just move pointer */
- drc_index = *data++;
+ data++;
break;
}
}
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
index b7c57f67f4f5..aba5e20eeb1f 100644
--- a/drivers/misc/cxl/hcalls.c
+++ b/drivers/misc/cxl/hcalls.c
@@ -167,7 +167,7 @@ long cxl_h_attach_process(u64 unit_address,
}
}
-/**
+/*
* cxl_h_detach_process - Detach a process element from a coherent
* platform function.
*/
@@ -197,7 +197,7 @@ long cxl_h_detach_process(u64 unit_address, u64 process_token)
}
}
-/**
+/*
* cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
* the partition to manipulate or query
* certain coherent platform function behaviors.
@@ -238,7 +238,7 @@ static long cxl_h_control_function(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_reset_afu - Perform a reset to the coherent platform function.
*/
long cxl_h_reset_afu(u64 unit_address)
@@ -249,7 +249,7 @@ long cxl_h_reset_afu(u64 unit_address)
NULL);
}
-/**
+/*
* cxl_h_suspend_process - Suspend a process from being executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -262,7 +262,7 @@ long cxl_h_suspend_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_resume_process - Resume a process to be executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -275,7 +275,7 @@ long cxl_h_resume_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_read_error_state - Checks the error state of the coherent
* platform function.
* R4 contains the error state
@@ -288,7 +288,7 @@ long cxl_h_read_error_state(u64 unit_address, u64 *state)
state);
}
-/**
+/*
* cxl_h_get_afu_err - collect the AFU error buffer
* Parameter1 = byte offset into error buffer to retrieve, valid values
* are between 0 and (ibm,error-buffer-size - 1)
@@ -304,7 +304,7 @@ long cxl_h_get_afu_err(u64 unit_address, u64 offset,
NULL);
}
-/**
+/*
* cxl_h_get_config - collect configuration record for the
* coherent platform function
* Parameter1 = # of configuration record to retrieve, valid values are
@@ -324,7 +324,7 @@ long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
NULL);
}
-/**
+/*
* cxl_h_terminate_process - Terminate the process before completion
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -337,7 +337,7 @@ long cxl_h_terminate_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = # of VPD record to retrieve, valid values are between 0
* and (ibm,#config-records - 1).
@@ -355,7 +355,7 @@ long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
out);
}
-/**
+/*
* cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
*/
long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
@@ -365,7 +365,7 @@ long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
0, 0, 0, 0, reg);
}
-/**
+/*
* cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
* based on an interrupt
* Parameter1 = value to write to the function-wide error interrupt register
@@ -378,7 +378,7 @@ long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
NULL);
}
-/**
+/*
* cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
* an error log
*/
@@ -390,7 +390,7 @@ long cxl_h_get_error_log(u64 unit_address, u64 value)
NULL);
}
-/**
+/*
* cxl_h_collect_int_info - Collect interrupt info about a coherent
* platform function after an interrupt occurred.
*/
@@ -425,7 +425,7 @@ long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
}
}
-/**
+/*
* cxl_h_control_faults - Control the operation of a coherent platform
* function after a fault occurs.
*
@@ -470,7 +470,7 @@ long cxl_h_control_faults(u64 unit_address, u64 process_token,
}
}
-/**
+/*
* cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
* allows the partition to manipulate or query
* certain coherent platform facility behaviors.
@@ -509,7 +509,7 @@ static long cxl_h_control_facility(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
*/
long cxl_h_reset_adapter(u64 unit_address)
@@ -520,7 +520,7 @@ long cxl_h_reset_adapter(u64 unit_address)
NULL);
}
-/**
+/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = 4K naturally aligned real buffer containing block
* list entries
@@ -536,7 +536,7 @@ long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
out);
}
-/**
+/*
* cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
* hypervisor call provide platform support for
* downloading a base adapter image to the coherent
@@ -616,7 +616,7 @@ static long cxl_h_download_facility(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_download_adapter_image - Download the base image to the coherent
* platform facility.
*/
@@ -629,7 +629,7 @@ long cxl_h_download_adapter_image(u64 unit_address,
list_address, num, out);
}
-/**
+/*
* cxl_h_validate_adapter_image - Validate the base image in the coherent
* platform facility.
*/
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index f0263d1a1fdf..d97a243ad30c 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
if (rc)
- goto err;
+ goto err1;
rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
if (rc)
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 1cf320e2a415..1264253cc07b 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -150,7 +150,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
out:
cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+ return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
}
static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
@@ -184,7 +184,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
out:
cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
+ return rc ? PCIBIOS_SET_FAILED : 0;
}
static struct pci_ops cxl_pcie_pci_ops =
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 713e92ee27ac..3c4eaba86576 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -66,13 +66,13 @@
Path Models", IEEE Transactions on communications, COM-25,
No. 6, June
1977.
- http://www.rowetel.com/images/echo/dual_path_paper.pdf
+ https://www.rowetel.com/images/echo/dual_path_paper.pdf
[2] The classic, very useful paper that tells you how to
actually build a real world echo canceller:
Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice
Echo Canceller with a TMS320020,
- http://www.rowetel.com/images/echo/spra129.pdf
+ https://www.rowetel.com/images/echo/spra129.pdf
[3] I have written a series of blog posts on this work, here is
Part 1: http://www.rowetel.com/blog/?p=18
@@ -80,7 +80,7 @@
[4] The source code http://svn.rowetel.com/software/oslec/
[5] A nice reference on LMS filters:
- http://en.wikipedia.org/wiki/Least_mean_squares_filter
+ https://en.wikipedia.org/wiki/Least_mean_squares_filter
Credits:
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 9ff18d4961ce..2591c21b2b5d 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -225,7 +225,7 @@ static const struct of_device_id at24_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at24_of_match);
-static const struct acpi_device_id at24_acpi_ids[] = {
+static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
{ "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 36a2eb837371..9627294fe3e9 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
/**
* eeprom_93cx6_readb - Read a byte from eeprom
* @eeprom: Pointer to eeprom structure
- * @word: Byte index from where we should start reading
+ * @byte: Byte index from where we should start reading
* @data: target pointer where the information will have to be stored
*
* This function will read a byte of the eeprom data
@@ -270,7 +270,7 @@ EXPORT_SYMBOL_GPL(eeprom_93cx6_readb);
* @eeprom: Pointer to eeprom structure
* @byte: Index from where we should start reading
* @data: target pointer where the information will have to be stored
- * @words: Number of bytes that should be read.
+ * @bytes: Number of bytes that should be read.
*
* This function will read all requested bytes from the eeprom,
* this is done by calling eeprom_93cx6_readb() multiple times.
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3c2d405bc79b..f950d0155876 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -103,7 +103,9 @@ EXPORT_SYMBOL_GPL(enclosure_for_each_device);
* enclosure_register - register device as an enclosure
*
* @dev: device containing the enclosure
+ * @name: chosen device name
* @components: number of components in the enclosure
+ * @cb: platform call-backs
*
* This sets up the device for being an enclosure. Note that @dev does
* not have to be a dedicated enclosure device. It may be some other type
@@ -266,7 +268,7 @@ static const struct attribute_group *enclosure_component_groups[];
/**
* enclosure_component_alloc - prepare a new enclosure component
* @edev: the enclosure to add the component
- * @num: the device number
+ * @number: the device number
* @type: the type of component being added
* @name: an optional name to appear in sysfs (leave NULL if none)
*
@@ -347,7 +349,7 @@ EXPORT_SYMBOL_GPL(enclosure_component_register);
/**
* enclosure_add_device - add a device as being part of an enclosure
* @edev: the enclosure device being added to.
- * @num: the number of the component
+ * @component: the number of the component
* @dev: the device being added
*
* Declares a real device to reside in slot (or identifier) @num of an
@@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(enclosure_add_device);
/**
* enclosure_remove_device - remove a device from an enclosure
* @edev: the enclosure device
- * @num: the number of the component to remove
+ * @dev: device to remove/put
*
* Returns zero on success or an error.
*
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 1dc6c7c5cbce..acc459fc8105 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -165,6 +165,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd)
/**
* genwqe_bus_reset() - Card recovery
+ * @cd: GenWQE device information
*
* pci_reset_function() will recover the device and ensure that the
* registers are accessible again when it completes with success. If
@@ -262,6 +263,7 @@ static void genwqe_tweak_hardware(struct genwqe_dev *cd)
/**
* genwqe_recovery_on_fatal_gfir_required() - Version depended actions
+ * @cd: GenWQE device information
*
* Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
* be ignored. This is e.g. true for the bitstream we gave to the card
@@ -280,6 +282,7 @@ int genwqe_flash_readback_fails(struct genwqe_dev *cd)
/**
* genwqe_T_psec() - Calculate PF/VF timeout register content
+ * @cd: GenWQE device information
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -303,6 +306,7 @@ static int genwqe_T_psec(struct genwqe_dev *cd)
/**
* genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*
* Do this _after_ card_reset() is called. Otherwise the values will
* vanish. The settings need to be done when the queues are inactive.
@@ -329,6 +333,7 @@ static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
/**
* genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*/
static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
{
@@ -543,6 +548,7 @@ static int genwqe_start(struct genwqe_dev *cd)
/**
* genwqe_stop() - Stop card operation
+ * @cd: GenWQE device information
*
* Recovery notes:
* As long as genwqe_thread runs we might access registers during
@@ -569,6 +575,8 @@ static int genwqe_stop(struct genwqe_dev *cd)
/**
* genwqe_recover_card() - Try to recover the card if it is possible
+ * @cd: GenWQE device information
+ * @fatal_err: Indicate whether to attempt soft reset
*
* If fatal_err is set no register access is possible anymore. It is
* likely that genwqe_start fails in that situation. Proper error
@@ -618,6 +626,7 @@ static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
/**
* genwqe_fir_checking() - Check the fault isolation registers of the card
+ * @cd: GenWQE device information
*
* If this code works ok, can be tried out with help of the genwqe_poke tool:
* sudo ./tools/genwqe_poke 0x8 0xfefefefefef
@@ -762,6 +771,7 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd)
/**
* genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
+ * @pci_dev: PCI device information struct
*
* Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
* reset method will not work in all cases.
@@ -826,8 +836,9 @@ static int genwqe_platform_recovery(struct genwqe_dev *cd)
return rc;
}
-/*
+/**
* genwqe_reload_bistream() - reload card bitstream
+ * @cd: GenWQE device information
*
* Set the appropriate register and call fundamental reset to reaload the card
* bitstream.
@@ -880,6 +891,7 @@ static int genwqe_reload_bistream(struct genwqe_dev *cd)
/**
* genwqe_health_thread() - Health checking thread
+ * @data: GenWQE device information
*
* This thread is only started for the PF of the card.
*
@@ -1043,18 +1055,17 @@ static int genwqe_health_thread_running(struct genwqe_dev *cd)
static int genwqe_health_check_stop(struct genwqe_dev *cd)
{
- int rc;
-
if (!genwqe_health_thread_running(cd))
return -EIO;
- rc = kthread_stop(cd->health_thread);
+ kthread_stop(cd->health_thread);
cd->health_thread = NULL;
return 0;
}
/**
* genwqe_pci_setup() - Allocate PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
@@ -1140,6 +1151,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/**
* genwqe_pci_remove() - Free PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
@@ -1154,7 +1166,8 @@ static void genwqe_pci_remove(struct genwqe_dev *cd)
/**
* genwqe_probe() - Device initialization
- * @pdev: PCI device information struct
+ * @pci_dev: PCI device information struct
+ * @id: PCI device ID
*
* Callable for multiple cards. This function is called on bind.
*
@@ -1214,6 +1227,7 @@ static int genwqe_probe(struct pci_dev *pci_dev,
/**
* genwqe_remove() - Called when device is removed (hot-plugable)
+ * @pci_dev: PCI device information struct
*
* Or when driver is unloaded respecitively when unbind is done.
*/
@@ -1233,8 +1247,10 @@ static void genwqe_remove(struct pci_dev *pci_dev)
genwqe_dev_free(cd);
}
-/*
+/**
* genwqe_err_error_detected() - Error detection callback
+ * @pci_dev: PCI device information struct
+ * @state: PCI channel state
*
* This callback is called by the PCI subsystem whenever a PCI bus
* error is detected.
@@ -1324,7 +1340,7 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
return 0;
}
-static struct pci_error_handlers genwqe_err_handler = {
+static const struct pci_error_handlers genwqe_err_handler = {
.error_detected = genwqe_err_error_detected,
.mmio_enabled = genwqe_err_result_none,
.slot_reset = genwqe_err_slot_reset,
@@ -1342,6 +1358,8 @@ static struct pci_driver genwqe_driver = {
/**
* genwqe_devnode() - Set default access mode for genwqe devices.
+ * @dev: Pointer to device (unused)
+ * @mode: Carrier to pass-back given mode (permissions)
*
* Default mode should be rw for everybody. Do not change default
* device name.
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 905106579935..0db4000dedf2 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -244,10 +244,13 @@ static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
(cd->card_state != GENWQE_CARD_USED);
}
+#define RET_DDCB_APPENDED 1
+#define RET_DDCB_TAPPED 2
/**
* enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @queue: queue this operation should be done on
+ * @pddcb: pointer to ddcb structure
* @ddcb_no: pointer to ddcb number being tapped
*
* Start execution of DDCB by tapping or append to queue via NEXT
@@ -259,9 +262,6 @@ static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
* Return: 1 if new DDCB is appended to previous
* 2 if DDCB queue is tapped via register/simulation
*/
-#define RET_DDCB_APPENDED 1
-#define RET_DDCB_TAPPED 2
-
static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
struct ddcb *pddcb, int ddcb_no)
{
@@ -316,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
/**
* copy_ddcb_results() - Copy output state from real DDCB to request
+ * @req: pointer to requsted DDCB parameters
+ * @ddcb_no: pointer to ddcb number being tapped
*
* Copy DDCB ASV to request struct. There is no endian
* conversion made, since data structure in ASV is still
@@ -356,6 +358,7 @@ static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
/**
* genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
* @cd: pointer to genwqe device descriptor
+ * @queue: queue to be checked
*
* Return: Number of DDCBs which were finished
*/
@@ -553,6 +556,8 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
/**
* get_next_ddcb() - Get next available DDCB
* @cd: pointer to genwqe device descriptor
+ * @queue: DDCB queue
+ * @num: internal DDCB number
*
* DDCB's content is completely cleared but presets for PRE and
* SEQNUM. This function must only be called when ddcb_lock is held.
@@ -900,7 +905,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
/**
* __genwqe_execute_raw_ddcb() - Setup and execute DDCB
* @cd: pointer to genwqe device descriptor
- * @req: user provided DDCB request
+ * @cmd: user provided DDCB command
* @f_flags: file mode: blocking, non-blocking
*/
int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
@@ -965,6 +970,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
/**
* genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
+ * @cd: pointer to genwqe device descriptor
*
* We use this as condition for our wait-queue code.
*/
@@ -993,6 +999,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
/**
* genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
+ * @cd: pointer to genwqe device descriptor
*
* Keep track on the number of DDCBs which ware currently in the
* queue. This is needed for statistics as well as conditon if we want
@@ -1171,6 +1178,7 @@ static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
/**
* genwqe_card_thread() - Work thread for the DDCB queue
+ * @data: pointer to genwqe device descriptor
*
* The idea is to check if there are DDCBs in processing. If there are
* some finished DDCBs, we process them and wakeup the
@@ -1299,6 +1307,7 @@ int genwqe_setup_service_layer(struct genwqe_dev *cd)
/**
* queue_wake_up_all() - Handles fatal error case
+ * @cd: pointer to genwqe device descriptor
*
* The PCI device got unusable and we have to stop all pending
* requests as fast as we can. The code after this must purge the
@@ -1323,6 +1332,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
/**
* genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
+ * @cd: pointer to genwqe device descriptor
*
* Relies on the pre-condition that there are no users of the card
* device anymore e.g. with open file-descriptors.
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 1b5b82e65268..491fb4482da2 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 040a0bda3125..55fc5b80e649 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -87,7 +87,7 @@ static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
* @cfile: Descriptor of opened file
* @u_addr: User virtual address
* @size: Size of buffer
- * @dma_addr: DMA address to be updated
+ * @virt_addr: Virtual address to be updated
*
* Return: Pointer to the corresponding mapping NULL if not found
*/
@@ -144,6 +144,7 @@ static void __genwqe_del_mapping(struct genwqe_file *cfile,
* @u_addr: user virtual address
* @size: size of buffer
* @dma_addr: DMA address to be updated
+ * @virt_addr: Virtual address to be updated
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
@@ -249,6 +250,8 @@ static void genwqe_remove_pinnings(struct genwqe_file *cfile)
/**
* genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
+ * @cd: GenWQE device information
+ * @sig: Signal to send out
*
* E.g. genwqe_send_signal(cd, SIGIO);
*/
@@ -380,6 +383,7 @@ static void genwqe_vma_open(struct vm_area_struct *vma)
/**
* genwqe_vma_close() - Called each time when vma is unmapped
+ * @vma: VMA area to close
*
* Free memory which got allocated by GenWQE mmap().
*/
@@ -416,6 +420,8 @@ static const struct vm_operations_struct genwqe_vma_ops = {
/**
* genwqe_mmap() - Provide contignous buffers to userspace
+ * @filp: File pointer (unused)
+ * @vma: VMA area to map
*
* We use mmap() to allocate contignous buffers used for DMA
* transfers. After the buffer is allocated we remap it to user-space
@@ -484,16 +490,15 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
}
+#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
+
/**
* do_flash_update() - Excute flash update (write image or CVPD)
- * @cd: genwqe device
+ * @cfile: Descriptor of opened file
* @load: details about image load
*
* Return: 0 if successful
*/
-
-#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
-
static int do_flash_update(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
@@ -820,6 +825,8 @@ static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
/**
* ddcb_cmd_cleanup() - Remove dynamically created fixup entries
+ * @cfile: Descriptor of opened file
+ * @req: DDCB work request
*
* Only if there are any. Pinnings are not removed.
*/
@@ -844,6 +851,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
/**
* ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
+ * @cfile: Descriptor of opened file
+ * @req: DDCB work request
*
* Before the DDCB gets executed we need to handle the fixups. We
* replace the user-space addresses with DMA addresses or do
@@ -974,6 +983,8 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/**
* genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
+ * @cfile: Descriptor of opened file
+ * @cmd: Command identifier (passed from user)
*
* The code will build up the translation tables or lookup the
* contignous memory allocation table to find the right translations
@@ -1339,6 +1350,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
/**
* genwqe_device_remove() - Remove genwqe's char device
+ * @cd: GenWQE device information
*
* This function must be called after the client devices are removed
* because it will free the major/minor number range for the genwqe
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 28a3fb1533f7..b2f115602523 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -129,7 +129,7 @@ static ssize_t base_clock_show(struct device *dev,
}
static DEVICE_ATTR_RO(base_clock);
-/**
+/*
* curr_bitstream_show() - Show the current bitstream id
*
* There is a bug in some old versions of the CPLD which selects the
@@ -156,7 +156,7 @@ static ssize_t curr_bitstream_show(struct device *dev,
}
static DEVICE_ATTR_RO(curr_bitstream);
-/**
+/*
* next_bitstream_show() - Show the next activated bitstream
*
* IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
@@ -260,7 +260,7 @@ static struct attribute *genwqe_normal_attributes[] = {
NULL,
};
-/**
+/*
* genwqe_is_visible() - Determine if sysfs attribute should be visible or not
*
* VFs have restricted mmio capabilities, so not all sysfs entries
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 77c21caf2acd..039b923d1d60 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -129,6 +129,9 @@ u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
/**
* genwqe_read_app_id() - Extract app_id
+ * @cd: genwqe device descriptor
+ * @app_name: carrier used to pass-back name
+ * @len: length of data for name
*
* app_unitcfg need to be filled with valid data first
*/
@@ -183,7 +186,7 @@ void genwqe_init_crc32(void)
* @init: initial crc (0xffffffff at start)
*
* polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
-
+ *
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
* result in a crc32 of 0xf33cb7d3.
*
@@ -277,7 +280,7 @@ static int genwqe_sgl_size(int num_pages)
return roundup(len, PAGE_SIZE);
}
-/**
+/*
* genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
*
* Allocates memory for sgl and overlapping pages. Pages which might
@@ -460,6 +463,8 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/**
* genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
+ * @cd: genwqe device descriptor
+ * @sgl: scatter gather list describing user-space memory
*
* After the DMA transfer has been completed we free the memory for
* the sgl and the cached pages. Data is being transferred from cached
@@ -710,6 +715,7 @@ int genwqe_read_softreset(struct genwqe_dev *cd)
/**
* genwqe_set_interrupt_capability() - Configure MSI capability structure
* @cd: pointer to the device
+ * @count: number of vectors to allocate
* Return: 0 if no error
*/
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
@@ -738,7 +744,7 @@ void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
* @i: index to desired entry
* @m: maximum possible entries
* @addr: addr which is read
- * @index: index in debug array
+ * @idx: index in debug array
* @val: read value
*/
static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
@@ -818,6 +824,8 @@ int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
/**
* genwqe_ffdc_buff_size() - Calculates the number of dump registers
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
*/
int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
{
@@ -871,6 +879,10 @@ int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
/**
* genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
+ * @regs: register information
+ * @max_regs: number of register entries
*/
int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
struct genwqe_reg *regs, unsigned int max_regs)
@@ -956,6 +968,10 @@ int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
/**
* genwqe_write_vreg() - Write register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @val: value to write
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -969,6 +985,9 @@ int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
/**
* genwqe_read_vreg() - Read register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -981,6 +1000,7 @@ u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
/**
* genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
+ * @cd: genwqe device descriptor
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -1005,6 +1025,7 @@ int genwqe_base_clock_frequency(struct genwqe_dev *cd)
/**
* genwqe_stop_traps() - Stop traps
+ * @cd: genwqe device descriptor
*
* Before reading out the analysis data, we need to stop the traps.
*/
@@ -1015,6 +1036,7 @@ void genwqe_stop_traps(struct genwqe_dev *cd)
/**
* genwqe_start_traps() - Start traps
+ * @cd: genwqe device descriptor
*
* After having read the data, we can/must enable the traps again.
*/
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index 421ebd903069..a786c0a7de9a 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -3,16 +3,15 @@
# Makefile for HabanaLabs AI accelerators driver
#
-obj-m := habanalabs.o
+obj-$(CONFIG_HABANA_AI) := habanalabs.o
-habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
- command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
- command_submission.o mmu.o firmware_if.o pci.o
-
-habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
+include $(src)/common/Makefile
+habanalabs-y += $(HL_COMMON_FILES)
include $(src)/goya/Makefile
habanalabs-y += $(HL_GOYA_FILES)
include $(src)/gaudi/Makefile
habanalabs-y += $(HL_GAUDI_FILES)
+
+habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
new file mode 100644
index 000000000000..b984bfa4face
--- /dev/null
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
+ common/asid.o common/habanalabs_ioctl.o \
+ common/command_buffer.o common/hw_queue.o common/irq.o \
+ common/sysfs.o common/hwmon.o common/memory.o \
+ common/command_submission.o common/mmu.o common/firmware_if.o \
+ common/pci.o
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/common/asid.c
index a2fdf31cf27c..a2fdf31cf27c 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 02d13f71b1df..7c38c4f7f9c0 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -10,12 +10,18 @@
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/genalloc.h>
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
- hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
- (void *) (uintptr_t) cb->kernel_address,
- cb->bus_address);
+ if (cb->is_internal)
+ gen_pool_free(hdev->internal_cb_pool,
+ cb->kernel_address, cb->size);
+ else
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
+ (void *) (uintptr_t) cb->kernel_address,
+ cb->bus_address);
+
kfree(cb);
}
@@ -44,9 +50,10 @@ static void cb_release(struct kref *ref)
}
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
- int ctx_id)
+ int ctx_id, bool internal_cb)
{
struct hl_cb *cb;
+ u32 cb_offset;
void *p;
/*
@@ -65,13 +72,25 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
if (!cb)
return NULL;
- if (ctx_id == HL_KERNEL_ASID_ID)
+ if (internal_cb) {
+ p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
+ if (!p) {
+ kfree(cb);
+ return NULL;
+ }
+
+ cb_offset = p - hdev->internal_cb_pool_virt_addr;
+ cb->is_internal = true;
+ cb->bus_address = hdev->internal_cb_va_base + cb_offset;
+ } else if (ctx_id == HL_KERNEL_ASID_ID) {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address, GFP_ATOMIC);
- else
+ } else {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address,
GFP_USER | __GFP_ZERO);
+ }
+
if (!p) {
dev_err(hdev->dev,
"failed to allocate %d of dma memory for CB\n",
@@ -87,7 +106,7 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
}
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 cb_size, u64 *handle, int ctx_id)
+ u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
@@ -112,28 +131,30 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out_err;
}
- /* Minimum allocation must be PAGE SIZE */
- if (cb_size < PAGE_SIZE)
- cb_size = PAGE_SIZE;
-
- if (ctx_id == HL_KERNEL_ASID_ID &&
- cb_size <= hdev->asic_prop.cb_pool_cb_size) {
-
- spin_lock(&hdev->cb_pool_lock);
- if (!list_empty(&hdev->cb_pool)) {
- cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
- pool_list);
- list_del(&cb->pool_list);
- spin_unlock(&hdev->cb_pool_lock);
- alloc_new_cb = false;
- } else {
- spin_unlock(&hdev->cb_pool_lock);
- dev_dbg(hdev->dev, "CB pool is empty\n");
+ if (!internal_cb) {
+ /* Minimum allocation must be PAGE SIZE */
+ if (cb_size < PAGE_SIZE)
+ cb_size = PAGE_SIZE;
+
+ if (ctx_id == HL_KERNEL_ASID_ID &&
+ cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+
+ spin_lock(&hdev->cb_pool_lock);
+ if (!list_empty(&hdev->cb_pool)) {
+ cb = list_first_entry(&hdev->cb_pool,
+ typeof(*cb), pool_list);
+ list_del(&cb->pool_list);
+ spin_unlock(&hdev->cb_pool_lock);
+ alloc_new_cb = false;
+ } else {
+ spin_unlock(&hdev->cb_pool_lock);
+ dev_dbg(hdev->dev, "CB pool is empty\n");
+ }
}
}
if (alloc_new_cb) {
- cb = hl_cb_alloc(hdev, cb_size, ctx_id);
+ cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
if (!cb) {
rc = -ENOMEM;
goto out_err;
@@ -229,8 +250,8 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
rc = -EINVAL;
} else {
rc = hl_cb_create(hdev, &hpriv->cb_mgr,
- args->in.cb_size, &handle,
- hpriv->ctx->asid);
+ args->in.cb_size, &handle,
+ hpriv->ctx->asid, false);
}
memset(args, 0, sizeof(*args));
@@ -398,14 +419,15 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
idr_destroy(&mgr->cb_handles);
}
-struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
+ bool internal_cb)
{
u64 cb_handle;
struct hl_cb *cb;
int rc;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
- HL_KERNEL_ASID_ID);
+ HL_KERNEL_ASID_ID, internal_cb);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
@@ -437,7 +459,7 @@ int hl_cb_pool_init(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
- HL_KERNEL_ASID_ID);
+ HL_KERNEL_ASID_ID, false);
if (cb) {
cb->is_pool = true;
list_add(&cb->pool_list, &hdev->cb_pool);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index f82974a916c3..b9840e368eb5 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -62,6 +62,12 @@ static void hl_fence_release(struct dma_fence *fence)
container_of(fence, struct hl_cs_compl, base_fence);
struct hl_device *hdev = hl_cs_cmpl->hdev;
+ /* EBUSY means the CS was never submitted and hence we don't have
+ * an attached hw_sob object that we should handle here
+ */
+ if (fence->error == -EBUSY)
+ goto free;
+
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT)) {
@@ -92,6 +98,7 @@ static void hl_fence_release(struct dma_fence *fence)
kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
}
+free:
kfree_rcu(hl_cs_cmpl, base_fence.rcu);
}
@@ -239,6 +246,18 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
kfree(job);
}
+static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
+ ctx->cs_counters.device_in_reset_drop_cnt;
+ hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
+ ctx->cs_counters.out_of_mem_drop_cnt;
+ hdev->aggregated_cs_counters.parsing_drop_cnt +=
+ ctx->cs_counters.parsing_drop_cnt;
+ hdev->aggregated_cs_counters.queue_full_drop_cnt +=
+ ctx->cs_counters.queue_full_drop_cnt;
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs,
@@ -328,21 +347,30 @@ static void cs_do_release(struct kref *ref)
hl_ctx_put(cs->ctx);
+ /* We need to mark an error for not submitted because in that case
+ * the dma fence release flow is different. Mainly, we don't need
+ * to handle hw_sob for signal/wait
+ */
if (cs->timedout)
dma_fence_set_error(cs->fence, -ETIMEDOUT);
else if (cs->aborted)
dma_fence_set_error(cs->fence, -EIO);
+ else if (!cs->submitted)
+ dma_fence_set_error(cs->fence, -EBUSY);
dma_fence_signal(cs->fence);
dma_fence_put(cs->fence);
+ cs_counters_aggregate(hdev, cs->ctx);
+
+ kfree(cs->jobs_in_queue_cnt);
kfree(cs);
}
static void cs_timedout(struct work_struct *work)
{
struct hl_device *hdev;
- int ctx_asid, rc;
+ int rc;
struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work);
rc = cs_get_unless_zero(cs);
@@ -358,11 +386,10 @@ static void cs_timedout(struct work_struct *work)
cs->timedout = true;
hdev = cs->ctx->hdev;
- ctx_asid = cs->ctx->asid;
- /* TODO: add information about last signaled seq and last emitted seq */
- dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
- ctx_asid, cs->sequence);
+ dev_err(hdev->dev,
+ "Command submission %llu has not finished in time!\n",
+ cs->sequence);
cs_put(cs);
@@ -405,21 +432,29 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
spin_lock(&ctx->cs_lock);
cs_cmpl->cs_seq = ctx->cs_sequence;
- other = ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ other = ctx->cs_pending[cs_cmpl->cs_seq &
+ (hdev->asic_prop.max_pending_cs - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
- spin_unlock(&ctx->cs_lock);
dev_dbg(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
rc = -EAGAIN;
goto free_fence;
}
+ cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+ sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
+ if (!cs->jobs_in_queue_cnt) {
+ rc = -ENOMEM;
+ goto free_fence;
+ }
+
dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
ctx->asid, ctx->cs_sequence);
cs->sequence = cs_cmpl->cs_seq;
- ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ ctx->cs_pending[cs_cmpl->cs_seq &
+ (hdev->asic_prop.max_pending_cs - 1)] =
&cs_cmpl->base_fence;
ctx->cs_sequence++;
@@ -434,6 +469,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
return 0;
free_fence:
+ spin_unlock(&ctx->cs_lock);
kfree(cs_cmpl);
free_cs:
kfree(cs);
@@ -450,10 +486,12 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
void hl_cs_rollback_all(struct hl_device *hdev)
{
+ int i;
struct hl_cs *cs, *tmp;
/* flush all completions */
- flush_workqueue(hdev->cq_wq);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ flush_workqueue(hdev->cq_wq[i]);
/* Make sure we don't have leftovers in the H/W queues mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
@@ -486,10 +524,18 @@ static int validate_queue_index(struct hl_device *hdev,
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
+ /* This must be checked here to prevent out-of-bounds access to
+ * hw_queues_props array
+ */
+ if (chunk->queue_index >= asic->max_queues) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ chunk->queue_index);
+ return -EINVAL;
+ }
+
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
- if ((chunk->queue_index >= HL_MAX_QUEUES) ||
- (hw_queue_prop->type == QUEUE_TYPE_NA)) {
+ if (hw_queue_prop->type == QUEUE_TYPE_NA) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
@@ -617,12 +663,15 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
- if (rc)
+ if (rc) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
goto free_cs_object;
+ }
if (is_kernel_allocated_cb) {
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
rc = -EINVAL;
goto free_cs_object;
}
@@ -636,6 +685,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
+ hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
if (is_kernel_allocated_cb)
@@ -668,6 +718,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
cs->ctx->asid, cs->sequence, job->id, rc);
@@ -676,6 +727,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -725,6 +777,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
+ enum hl_queue_type q_type;
u64 *signal_seq_arr = NULL, signal_seq;
u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
int rc;
@@ -757,9 +810,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
q_idx = chunk->queue_index;
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+ q_type = hw_queue_prop->type;
- if ((q_idx >= HL_MAX_QUEUES) ||
- (hw_queue_prop->type != QUEUE_TYPE_EXT)) {
+ if ((q_idx >= hdev->asic_prop.max_queues) ||
+ (!hw_queue_prop->supports_sync_stream)) {
dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
goto free_cs_chunk_array;
@@ -856,25 +910,28 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
*cs_seq = cs->sequence;
- job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ job = hl_cs_allocate_job(hdev, q_type, true);
if (!job) {
+ ctx->cs_counters.out_of_mem_drop_cnt++;
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto put_cs;
}
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (cs->type == CS_TYPE_WAIT)
+ cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+ else
+ cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
+
+ cb = hl_cb_kernel_create(hdev, cb_size,
+ q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
if (!cb) {
+ ctx->cs_counters.out_of_mem_drop_cnt++;
kfree(job);
rc = -EFAULT;
goto put_cs;
}
- if (cs->type == CS_TYPE_WAIT)
- cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
- else
- cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
-
job->id = 0;
job->cs = cs;
job->user_cb = cb;
@@ -1113,7 +1170,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
rc = PTR_ERR(fence);
if (rc == -EINVAL)
dev_notice_ratelimited(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu\n",
+ "Can't wait on CS %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
} else if (fence) {
rc = dma_fence_wait_timeout(fence, true, timeout);
@@ -1146,15 +1203,21 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc < 0) {
- dev_err_ratelimited(hdev->dev,
- "Error %ld on waiting for CS handle %llu\n",
- rc, seq);
if (rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for CS handle %llu\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;
} else if (rc == -ETIMEDOUT) {
+ dev_err_ratelimited(hdev->dev,
+ "CS %llu has timed-out while user process is waiting for it\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
} else if (rc == -EIO) {
+ dev_err_ratelimited(hdev->dev,
+ "CS %llu has been aborted while user process is waiting for it\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_ABORTED;
}
return rc;
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/common/context.c
index ec92b3506b1f..3e375958e73b 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -22,9 +22,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
* to this function unless the ref count is 0
*/
- for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
+ for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
dma_fence_put(ctx->cs_pending[i]);
+ kfree(ctx->cs_pending);
+
if (ctx->asid != HL_KERNEL_ASID_ID) {
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
@@ -110,8 +112,7 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
return;
dev_warn(hdev->dev,
- "Context %d closed or terminated but its CS are executing\n",
- ctx->asid);
+ "user process released device but its command submissions are still executing\n");
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
@@ -126,34 +127,49 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
+ ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL);
+ if (!ctx->cs_pending)
+ return -ENOMEM;
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mmu ctx module\n");
- goto mem_ctx_err;
+ goto err_free_cs_pending;
}
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
dev_err(hdev->dev, "No free ASID, failed to create context\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free_cs_pending;
}
rc = hl_vm_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM;
- goto mem_ctx_err;
+ goto err_asid_free;
+ }
+
+ rc = hdev->asic_funcs->ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "ctx_init failed\n");
+ goto err_vm_ctx_fini;
}
}
return 0;
-mem_ctx_err:
- if (ctx->asid != HL_KERNEL_ASID_ID)
- hl_asid_free(hdev, ctx->asid);
+err_vm_ctx_fini:
+ hl_vm_ctx_fini(ctx);
+err_asid_free:
+ hl_asid_free(hdev, ctx->asid);
+err_free_cs_pending:
+ kfree(ctx->cs_pending);
return rc;
}
@@ -170,6 +186,7 @@ int hl_ctx_put(struct hl_ctx *ctx)
struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
+ struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct dma_fence *fence;
spin_lock(&ctx->cs_lock);
@@ -179,13 +196,13 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
return ERR_PTR(-EINVAL);
}
- if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
+ if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
spin_unlock(&ctx->cs_lock);
return NULL;
}
fence = dma_fence_get(
- ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
+ ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)]);
spin_unlock(&ctx->cs_lock);
return fence;
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 3c8dcdfba20c..c50c6fc9e905 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
#include <linux/debugfs.h>
@@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, (long *) val);
+ 0, (long *) val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
@@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
@@ -480,7 +480,7 @@ out:
return 0;
}
-static ssize_t mmu_write(struct file *file, const char __user *buf,
+static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct seq_file *s = file->private_data;
@@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
if (*ppos)
return 0;
- sprintf(tmp_buf, "%d\n", hdev->clock_gating);
+ sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
strlen(tmp_buf) + 1);
@@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u32 value;
+ u64 value;
ssize_t rc;
if (atomic_read(&hdev->in_reset)) {
@@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
return 0;
}
- rc = kstrtouint_from_user(buf, count, 10, &value);
+ rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
- if (value) {
- hdev->clock_gating = 1;
- if (hdev->asic_funcs->enable_clock_gating)
- hdev->asic_funcs->enable_clock_gating(hdev);
- } else {
- if (hdev->asic_funcs->disable_clock_gating)
- hdev->asic_funcs->disable_clock_gating(hdev);
- hdev->clock_gating = 0;
- }
+ hdev->clock_gating_mask = value;
+ hdev->asic_funcs->set_clock_gating(hdev);
return count;
}
@@ -1125,7 +1118,7 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
- {"mmu", mmu_show, mmu_write},
+ {"mmu", mmu_show, mmu_asid_va_write},
{"engines", engines_show, NULL}
};
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/common/device.c
index 2b38a119704c..be16b75bdfdb 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -249,7 +249,8 @@ static void device_cdev_sysfs_del(struct hl_device *hdev)
*/
static int device_early_init(struct hl_device *hdev)
{
- int rc;
+ int i, rc;
+ char workq_name[32];
switch (hdev->asic_type) {
case ASIC_GOYA:
@@ -274,11 +275,24 @@ static int device_early_init(struct hl_device *hdev)
if (rc)
goto early_fini;
- hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
- if (hdev->cq_wq == NULL) {
- dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
- rc = -ENOMEM;
- goto asid_fini;
+ if (hdev->asic_prop.completion_queues_count) {
+ hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
+ sizeof(*hdev->cq_wq),
+ GFP_ATOMIC);
+ if (!hdev->cq_wq) {
+ rc = -ENOMEM;
+ goto asid_fini;
+ }
+ }
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
+ snprintf(workq_name, 32, "hl-free-jobs-%u", i);
+ hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
+ if (hdev->cq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
+ rc = -ENOMEM;
+ goto free_cq_wq;
+ }
}
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
@@ -321,7 +335,10 @@ free_chip_info:
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
- destroy_workqueue(hdev->cq_wq);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ if (hdev->cq_wq[i])
+ destroy_workqueue(hdev->cq_wq[i]);
+ kfree(hdev->cq_wq);
asid_fini:
hl_asid_fini(hdev);
early_fini:
@@ -339,6 +356,8 @@ early_fini:
*/
static void device_early_fini(struct hl_device *hdev)
{
+ int i;
+
mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
@@ -351,7 +370,10 @@ static void device_early_fini(struct hl_device *hdev)
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq);
- destroy_workqueue(hdev->cq_wq);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ destroy_workqueue(hdev->cq_wq[i]);
+ kfree(hdev->cq_wq);
hl_asid_fini(hdev);
@@ -608,7 +630,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
hdev->in_debug = 0;
if (!hdev->hard_reset_pending)
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
goto out;
}
@@ -838,6 +860,22 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
if (rc)
return 0;
+ if (hard_reset) {
+ /* Disable PCI access from device F/W so he won't send
+ * us additional interrupts. We disable MSI/MSI-X at
+ * the halt_engines function and we can't have the F/W
+ * sending us interrupts after that. We need to disable
+ * the access here because if the device is marked
+ * disable, the message won't be send. Also, in case
+ * of heartbeat, the device CPU is marked as disable
+ * so this message won't be sent
+ */
+ if (hl_fw_send_pci_access_msg(hdev,
+ ARMCP_PACKET_DISABLE_PCI_ACCESS))
+ dev_warn(hdev->dev,
+ "Failed to disable PCI access by F/W\n");
+ }
+
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -995,6 +1033,12 @@ again:
}
}
+ /* Device is now enabled as part of the initialization requires
+ * communication with the device firmware to get information that
+ * is required for the initialization itself
+ */
+ hdev->disabled = false;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev,
@@ -1002,8 +1046,6 @@ again:
goto out_err;
}
- hdev->disabled = false;
-
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
@@ -1144,14 +1186,17 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
* because there the addresses of the completion queues are being
* passed as arguments to request_irq
*/
- hdev->completion_queue = kcalloc(cq_cnt,
- sizeof(*hdev->completion_queue),
- GFP_KERNEL);
+ if (cq_cnt) {
+ hdev->completion_queue = kcalloc(cq_cnt,
+ sizeof(*hdev->completion_queue),
+ GFP_KERNEL);
- if (!hdev->completion_queue) {
- dev_err(hdev->dev, "failed to allocate completion queues\n");
- rc = -ENOMEM;
- goto hw_queues_destroy;
+ if (!hdev->completion_queue) {
+ dev_err(hdev->dev,
+ "failed to allocate completion queues\n");
+ rc = -ENOMEM;
+ goto hw_queues_destroy;
+ }
}
for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
@@ -1162,6 +1207,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
"failed to initialize completion queue\n");
goto cq_fini;
}
+ hdev->completion_queue[i].cq_idx = i;
}
/*
@@ -1219,6 +1265,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
*/
add_cdev_sysfs_on_err = true;
+ /* Device is now enabled as part of the initialization requires
+ * communication with the device firmware to get information that
+ * is required for the initialization itself
+ */
+ hdev->disabled = false;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W\n");
@@ -1226,8 +1278,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto out_disabled;
}
- hdev->disabled = false;
-
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index baf790cf4b78..f70302cdab1b 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hl_boot_if.h"
+#include "../include/common/hl_boot_if.h"
#include <linux/firmware.h>
#include <linux/genalloc.h>
@@ -15,7 +15,10 @@
/**
* hl_fw_load_fw_to_device() - Load F/W code to device's memory.
+ *
* @hdev: pointer to hl_device structure.
+ * @fw_name: the firmware image name
+ * @dst: IO memory mapped address space to copy firmware to
*
* Copy fw code from firmware file to device memory.
*
@@ -61,7 +64,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
- sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+ sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
@@ -144,7 +147,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, &result);
+ 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@@ -183,7 +186,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
- total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+ total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -204,7 +207,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
- sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+ sizeof(test_pkt), 0, &result);
if (!rc) {
if (result != ARMCP_PACKET_FENCE_VAL)
@@ -248,7 +251,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
- sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+ sizeof(hb_pkt), 0, &result);
if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
rc = -EIO;
@@ -286,7 +289,7 @@ int hl_fw_armcp_info_get(struct hl_device *hdev)
HL_ARMCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
- "Failed to send ArmCP info pkt, error %d\n", rc);
+ "Failed to handle ArmCP info pkt, error %d\n", rc);
goto out;
}
@@ -337,7 +340,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
if (rc) {
dev_err(hdev->dev,
- "Failed to send ArmCP EEPROM packet, error %d\n", rc);
+ "Failed to handle ArmCP EEPROM packet, error %d\n", rc);
goto out;
}
@@ -390,6 +393,53 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
"Device boot error - NIC F/W initialization failed\n");
}
+static void hl_detect_cpu_boot_status(struct hl_device *hdev, u32 status)
+{
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "Device boot error - BTL did NOT run\n");
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck inside WFE loop\n");
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in BTL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in Preboot\n");
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in SPL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in u-boot\n");
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ break;
+ case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+ dev_err(hdev->dev,
+ "Device boot error - u-boot stopped by user\n");
+ break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Device boot error - Invalid status code %d\n",
+ status);
+ break;
+ }
+}
+
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
u32 boot_err0_reg, bool skip_bmc,
@@ -463,50 +513,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
* versions but we keep them here for backward compatibility
*/
if (rc) {
- switch (status) {
- case CPU_BOOT_STATUS_NA:
- dev_err(hdev->dev,
- "Device boot error - BTL did NOT run\n");
- break;
- case CPU_BOOT_STATUS_IN_WFE:
- dev_err(hdev->dev,
- "Device boot error - Stuck inside WFE loop\n");
- break;
- case CPU_BOOT_STATUS_IN_BTL:
- dev_err(hdev->dev,
- "Device boot error - Stuck in BTL\n");
- break;
- case CPU_BOOT_STATUS_IN_PREBOOT:
- dev_err(hdev->dev,
- "Device boot error - Stuck in Preboot\n");
- break;
- case CPU_BOOT_STATUS_IN_SPL:
- dev_err(hdev->dev,
- "Device boot error - Stuck in SPL\n");
- break;
- case CPU_BOOT_STATUS_IN_UBOOT:
- dev_err(hdev->dev,
- "Device boot error - Stuck in u-boot\n");
- break;
- case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
- dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
- break;
- case CPU_BOOT_STATUS_UBOOT_NOT_READY:
- dev_err(hdev->dev,
- "Device boot error - u-boot stopped by user\n");
- break;
- case CPU_BOOT_STATUS_TS_INIT_FAIL:
- dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
- break;
- default:
- dev_err(hdev->dev,
- "Device boot error - Invalid status code %d\n",
- status);
- break;
- }
-
+ hl_detect_cpu_boot_status(hdev, status);
rc = -EIO;
goto out;
}
@@ -566,7 +573,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
"Device reports FIT image is corrupted\n");
else
dev_err(hdev->dev,
- "Device failed to load, %d\n", status);
+ "Failed to load firmware to device, %d\n",
+ status);
rc = -EIO;
goto out;
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 1ecdcf8b763a..018d9d67e8e6 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -8,8 +8,9 @@
#ifndef HABANALABSP_H_
#define HABANALABSP_H_
-#include "include/armcp_if.h"
-#include "include/qman_if.h"
+#include "../include/common/armcp_if.h"
+#include "../include/common/qman_if.h"
+#include <uapi/misc/habanalabs.h>
#include <linux/cdev.h>
#include <linux/iopoll.h>
@@ -40,11 +41,6 @@
#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
-#define HL_MAX_QUEUES 128
-
-/* MUST BE POWER OF 2 and larger than 1 */
-#define HL_MAX_PENDING_CS 64
-
#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
/* Memory */
@@ -53,6 +49,10 @@
/* MMU */
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+/*
+ * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
+ * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
+ */
#define HL_RSVD_SOBS 4
#define HL_RSVD_MONS 2
@@ -61,6 +61,11 @@
#define HL_MAX_SOB_VAL (1 << 15)
+#define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0))
+#define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1))
+
+#define HL_PCI_NUM_BARS 6
+
/**
* struct pgt_info - MMU hop page info.
* @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -86,6 +91,16 @@ struct hl_device;
struct hl_fpriv;
/**
+ * enum hl_pci_match_mode - pci match mode per region
+ * @PCI_ADDRESS_MATCH_MODE: address match mode
+ * @PCI_BAR_MATCH_MODE: bar match mode
+ */
+enum hl_pci_match_mode {
+ PCI_ADDRESS_MATCH_MODE,
+ PCI_BAR_MATCH_MODE
+};
+
+/**
* enum hl_fw_component - F/W components to read version through registers.
* @FW_COMP_UBOOT: u-boot.
* @FW_COMP_PREBOOT: preboot.
@@ -121,6 +136,32 @@ enum hl_cs_type {
};
/*
+ * struct hl_inbound_pci_region - inbound region descriptor
+ * @mode: pci match mode for this region
+ * @addr: region target address
+ * @size: region size in bytes
+ * @offset_in_bar: offset within bar (address match mode)
+ * @bar: bar id
+ */
+struct hl_inbound_pci_region {
+ enum hl_pci_match_mode mode;
+ u64 addr;
+ u64 size;
+ u64 offset_in_bar;
+ u8 bar;
+};
+
+/*
+ * struct hl_outbound_pci_region - outbound region descriptor
+ * @addr: region target address
+ * @size: region size in bytes
+ */
+struct hl_outbound_pci_region {
+ u64 addr;
+ u64 size;
+};
+
+/*
* struct hl_hw_sob - H/W SOB info.
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
@@ -141,11 +182,13 @@ struct hl_hw_sob {
* false otherwise.
* @requires_kernel_cb: true if a CB handle must be provided for jobs on this
* queue, false otherwise (a CB address must be provided).
+ * @supports_sync_stream: True if queue supports sync stream
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
u8 requires_kernel_cb;
+ u8 supports_sync_stream;
};
/**
@@ -241,14 +284,19 @@ struct hl_mmu_properties {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
+ * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
+ * @max_pending_cs: maximum of concurrent pending command submissions
+ * @max_queues: maximum amount of queues in the system
+ * @sync_stream_first_sob: first sync object available for sync stream use
+ * @sync_stream_first_mon: first monitor available for sync stream use
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
- struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
+ struct hw_queue_properties *hw_queues_props;
struct armcp_info armcp_info;
char uboot_ver[VERSION_MAX_LEN];
char preboot_ver[VERSION_MAX_LEN];
@@ -282,9 +330,14 @@ struct asic_fixed_properties {
u32 psoc_pci_pll_nf;
u32 psoc_pci_pll_od;
u32 psoc_pci_pll_div_factor;
+ u32 psoc_timestamp_frequency;
u32 high_pll;
u32 cb_pool_cb_cnt;
u32 cb_pool_cb_size;
+ u32 max_pending_cs;
+ u32 max_queues;
+ u16 sync_stream_first_sob;
+ u16 sync_stream_first_mon;
u8 tpc_enabled_mask;
u8 completion_queues_count;
};
@@ -339,6 +392,7 @@ struct hl_cb_mgr {
* @ctx_id: holds the ID of the owner's context.
* @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
+ * @is_internal: internaly allocated
*/
struct hl_cb {
struct kref refcount;
@@ -355,6 +409,7 @@ struct hl_cb {
u32 ctx_id;
u8 mmap;
u8 is_pool;
+ u8 is_internal;
};
@@ -364,38 +419,19 @@ struct hl_cb {
struct hl_cs_job;
-/*
- * Currently, there are two limitations on the maximum length of a queue:
- *
- * 1. The memory footprint of the queue. The current allocated space for the
- * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
- * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
- * which currently is 4096/16 = 256 entries.
- *
- * To increase that, we need either to decrease the size of the
- * BD (difficult), or allocate more than a single page (easier).
- *
- * 2. Because the size of the JOB handle field in the BD CTL / completion queue
- * is 10-bit, we can have up to 1024 open jobs per hardware queue.
- * Therefore, each queue can hold up to 1024 entries.
- *
- * HL_QUEUE_LENGTH is in units of struct hl_bd.
- * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
- */
-
-#define HL_PAGE_SIZE 4096 /* minimum page size */
-/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
-#define HL_QUEUE_LENGTH 256
+/* Queue length of external and HW queues */
+#define HL_QUEUE_LENGTH 4096
#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
-/*
- * HL_CQ_LENGTH is in units of struct hl_cq_entry.
- * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
- */
+#if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
+#error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
+#endif
+
+/* HL_CQ_LENGTH is in units of struct hl_cq_entry */
#define HL_CQ_LENGTH HL_QUEUE_LENGTH
#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
-/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
+/* Must be power of 2 */
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
@@ -422,6 +458,7 @@ struct hl_cs_job;
* exist).
* @curr_sob_offset: the id offset to the currently used SOB from the
* HL_RSVD_SOBS that are being used by this queue.
+ * @supports_sync_stream: True if queue supports sync stream
*/
struct hl_hw_queue {
struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
@@ -430,7 +467,7 @@ struct hl_hw_queue {
u64 kernel_address;
dma_addr_t bus_address;
u32 pi;
- u32 ci;
+ atomic_t ci;
u32 hw_queue_id;
u32 cq_id;
u32 msi_vec;
@@ -440,6 +477,7 @@ struct hl_hw_queue {
u16 base_mon_id;
u8 valid;
u8 curr_sob_offset;
+ u8 supports_sync_stream;
};
/**
@@ -447,6 +485,7 @@ struct hl_hw_queue {
* @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address
+ * @cq_idx: completion queue index in array
* @hw_queue_id: the id of the matching H/W queue
* @ci: ci inside the queue
* @pi: pi inside the queue
@@ -456,6 +495,7 @@ struct hl_cq {
struct hl_device *hdev;
u64 kernel_address;
dma_addr_t bus_address;
+ u32 cq_idx;
u32 hw_queue_id;
u32 ci;
u32 pi;
@@ -519,6 +559,15 @@ enum hl_pll_frequency {
PLL_LAST
};
+#define PLL_REF_CLK 50
+
+enum div_select_defs {
+ DIV_SEL_REF_CLK = 0,
+ DIV_SEL_PLL_CLK = 1,
+ DIV_SEL_DIVIDED_REF = 2,
+ DIV_SEL_DIVIDED_PLL = 3,
+};
+
/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
@@ -578,8 +627,9 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
- * @enable_clock_gating: enable clock gating for reducing power consumption.
- * @disable_clock_gating: disable clock for accessing registers on HBW.
+ * @set_clock_gating: enable/disable clock gating per engine according to
+ * clock gating mask in hdev
+ * @disable_clock_gating: disable clock gating completely
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
@@ -587,7 +637,11 @@ enum hl_pll_frequency {
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
- * @send_cpu_message: send buffer to ArmCP.
+ * @send_cpu_message: send message to F/W. If the message is timedout, the
+ * driver will eventually reset the device. The timeout can
+ * be determined by the calling function or it can be 0 and
+ * then the timeout is the default timeout for the specific
+ * ASIC
* @get_hw_state: retrieve the H/W state
* @pci_bars_map: Map PCI BARs.
* @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
@@ -596,14 +650,13 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @ctx_init: context dependent initialization.
* @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
* @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
* @read_device_fw_version: read the device's firmware versions that are
* contained in registers
* @load_firmware_to_device: load the firmware to the device's memory
* @load_boot_fit_to_device: load boot fit to device's memory
- * @ext_queue_init: Initialize the given external queue.
- * @ext_queue_reset: Reset the given external queue.
* @get_signal_cb_size: Get signal CB size.
* @get_wait_cb_size: Get wait CB size.
* @gen_signal_cb: Generate a signal CB.
@@ -680,7 +733,7 @@ struct hl_asic_funcs {
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
- void (*enable_clock_gating)(struct hl_device *hdev);
+ void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
@@ -700,14 +753,13 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*ctx_init)(struct hl_ctx *ctx);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
void (*read_device_fw_version)(struct hl_device *hdev,
enum hl_fw_component fwc);
int (*load_firmware_to_device)(struct hl_device *hdev);
int (*load_boot_fit_to_device)(struct hl_device *hdev);
- void (*ext_queue_init)(struct hl_device *hdev, u32 hw_queue_id);
- void (*ext_queue_reset)(struct hl_device *hdev, u32 hw_queue_id);
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
@@ -743,7 +795,6 @@ struct hl_va_range {
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
- * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
@@ -777,18 +828,18 @@ struct hl_va_range {
*/
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
- DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
- struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
+ struct dma_fence **cs_pending;
struct hl_va_range *host_va_range;
struct hl_va_range *host_huge_va_range;
struct hl_va_range *dram_va_range;
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
+ struct hl_cs_counters cs_counters;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@@ -863,7 +914,7 @@ struct hl_userptr {
* @aborted: true if CS was aborted due to some device error.
*/
struct hl_cs {
- u16 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ u16 *jobs_in_queue_cnt;
struct hl_ctx *ctx;
struct list_head job_list;
spinlock_t job_lock;
@@ -1347,7 +1398,9 @@ struct hl_device_idle_busy_ts {
/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
- * @pcie_bar: array of available PCIe bars.
+ * @pcie_bar_phys: array of available PCIe bars physical addresses.
+ * (required only for PCI address match mode)
+ * @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
* @cdev: related char device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
@@ -1358,7 +1411,8 @@ struct hl_device_idle_busy_ts {
* @asic_name: ASIC specific nmae.
* @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq.
- * @cq_wq: work queue of completion queues for executing work in process context
+ * @cq_wq: work queues of completion queues for executing work in process
+ * context.
* @eq_wq: work queue of event queue for executing work in process context.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
@@ -1387,17 +1441,25 @@ struct hl_device_idle_busy_ts {
* @hl_debugfs: device's debugfs manager.
* @cb_pool: list of preallocated CBs.
* @cb_pool_lock: protects the CB pool.
+ * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
+ * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
+ * @internal_cb_pool: internal command buffer memory pool.
+ * @internal_cb_va_base: internal cb pool mmu virtual address base
* @fpriv_list: list of file private data structures. Each structure is created
* when a user opens the device
* @fpriv_list_lock: protects the fpriv_list
* @compute_ctx: current compute context executing.
* @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
* and vice-versa
+ * @aggregated_cs_counters: aggregated cs counters among all contexts
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, the driver will restore
* this value and update the F/W after the re-initialization
+ * @clock_gating_mask: is clock gating enabled. bitmask that represents the
+ * different engines. See debugfs-driver-habanalabs for
+ * details.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @cs_active_cnt: number of active command submissions on this device (active
@@ -1425,7 +1487,6 @@ struct hl_device_idle_busy_ts {
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @mmu_huge_page_opt: is MMU huge pages optimization enabled.
- * @clock_gating: is clock gating enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fpriv_list, enforces
@@ -1435,12 +1496,14 @@ struct hl_device_idle_busy_ts {
* @cdev_sysfs_created: were char devices and sysfs nodes created.
* @stop_on_err: true if engines should stop on error.
* @supports_sync_stream: is sync stream supported.
+ * @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
*/
struct hl_device {
struct pci_dev *pdev;
- void __iomem *pcie_bar[6];
+ u64 pcie_bar_phys[HL_PCI_NUM_BARS];
+ void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
struct cdev cdev;
struct cdev cdev_ctrl;
@@ -1451,7 +1514,7 @@ struct hl_device {
char asic_name[16];
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
- struct workqueue_struct *cq_wq;
+ struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;
@@ -1483,6 +1546,11 @@ struct hl_device {
struct list_head cb_pool;
spinlock_t cb_pool_lock;
+ void *internal_cb_pool_virt_addr;
+ dma_addr_t internal_cb_pool_dma_addr;
+ struct gen_pool *internal_cb_pool;
+ u64 internal_cb_va_base;
+
struct list_head fpriv_list;
struct mutex fpriv_list_lock;
@@ -1490,9 +1558,12 @@ struct hl_device {
struct hl_device_idle_busy_ts *idle_busy_ts_arr;
+ struct hl_cs_counters aggregated_cs_counters;
+
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
+ u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
int cs_active_cnt;
@@ -1514,7 +1585,6 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 pmmu_huge_range;
u8 init_done;
- u8 clock_gating;
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;
@@ -1522,6 +1592,7 @@ struct hl_device {
u8 cdev_sysfs_created;
u8 stop_on_err;
u8 supports_sync_stream;
+ u8 sync_stream_queue_idx;
u8 supports_coresight;
u8 supports_soft_reset;
@@ -1690,7 +1761,7 @@ int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
- u64 *handle, int ctx_id);
+ u64 *handle, int ctx_id, bool internal_cb);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
@@ -1698,7 +1769,8 @@ struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
void hl_cb_put(struct hl_cb *cb);
void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
-struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
+ bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
@@ -1762,9 +1834,10 @@ int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
u64 addr);
-int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
- u64 dram_base_address, u64 host_phys_base_address,
- u64 host_phys_size);
+int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
+ struct hl_inbound_pci_region *pci_region);
+int hl_pci_set_outbound_region(struct hl_device *hdev,
+ struct hl_outbound_pci_region *pci_region);
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 8652c7e5d7f1..c6b31e93fb5e 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -232,13 +232,12 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->fw_loading = 1;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
- hdev->clock_gating = 1;
+ hdev->clock_gating_mask = ULONG_MAX;
hdev->reset_pcilink = 0;
hdev->axi_drain = 0;
hdev->sram_scrambler_enable = 1;
hdev->dram_scrambler_enable = 1;
- hdev->rl_enable = 1;
hdev->bmc_enable = 1;
hdev->hard_reset_on_fw_events = 1;
}
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 52eedd3a6c3a..5af1c03da473 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -276,6 +276,27 @@ static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
}
+static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_info_cs_counters cs_counters = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters,
+ sizeof(struct hl_cs_counters));
+
+ if (hpriv->ctx)
+ memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters,
+ sizeof(struct hl_cs_counters));
+
+ return copy_to_user(out, &cs_counters,
+ min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -336,6 +357,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_TIME_SYNC:
return time_sync_info(hdev, args);
+ case HL_INFO_CS_COUNTERS:
+ return cs_counters_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index f4434b39ef1b..287681646071 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -23,10 +23,14 @@ inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
return ptr;
}
+static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
+{
+ return atomic_read(ci) & ((queue_len << 1) - 1);
+}
static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
{
- int delta = (q->pi - q->ci);
+ int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
if (delta >= 0)
return (queue_len - delta);
@@ -40,21 +44,14 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs)
struct hl_hw_queue *q;
int i;
- hdev->asic_funcs->hw_queues_lock(hdev);
-
if (hdev->disabled)
- goto out;
+ return;
q = &hdev->kernel_queues[0];
- for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_INT) {
- q->ci += cs->jobs_in_queue_cnt[i];
- q->ci &= ((q->int_queue_len << 1) - 1);
- }
+ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_INT)
+ atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
}
-
-out:
- hdev->asic_funcs->hw_queues_unlock(hdev);
}
/*
@@ -161,6 +158,13 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
{
int free_slots_cnt;
+ if (num_of_entries > q->int_queue_len) {
+ dev_err(hdev->dev,
+ "Cannot populate queue %u with %u jobs\n",
+ q->hw_queue_id, num_of_entries);
+ return -ENOMEM;
+ }
+
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, q->int_queue_len);
@@ -174,38 +178,26 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
- * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
* @hdev: Pointer to hl_device structure.
* @q: Pointer to hl_hw_queue structure.
* @num_of_entries: How many entries to check for space.
*
- * Perform the following:
- * - Make sure we have enough space in the completion queue.
- * This check also ensures that there is enough space in the h/w queue, as
- * both queues are of the same size.
- * - Reserve space in the completion queue (needs to be reversed if there
- * is a failure down the road before the actual submission of work).
+ * Notice: We do not reserve queue entries so this function mustn't be called
+ * more than once per CS for the same queue
*
- * Both operations are done using the "free_slots_cnt" field of the completion
- * queue. The CI counters of the queue and the completion queue are not
- * needed/used for the H/W queue type.
*/
static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
int num_of_entries)
{
- atomic_t *free_slots =
- &hdev->completion_queue[q->cq_id].free_slots_cnt;
+ int free_slots_cnt;
- /*
- * Check we have enough space in the completion queue.
- * Add -1 to counter (decrement) unless counter was already 0.
- * In that case, CQ is full so we can't submit a new CB.
- * atomic_add_unless will return 0 if counter was already 0.
- */
- if (atomic_add_negative(num_of_entries * -1, free_slots)) {
- dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
- num_of_entries, q->hw_queue_id);
- atomic_add(num_of_entries, free_slots);
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
@@ -366,7 +358,6 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
- struct hl_cq *cq;
u64 ptr;
u32 offset, ctl, len;
@@ -376,7 +367,7 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
* write address offset in the SM block (QMAN LBW message).
* The write address offset is calculated as "COMP_OFFSET << 2".
*/
- offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
@@ -395,17 +386,6 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
else
ptr = (u64) (uintptr_t) job->user_cb;
- /*
- * No need to protect pi_offset because scheduling to the
- * H/W queues is done under the scheduler mutex
- *
- * No need to check if CQ is full because it was already
- * checked in hw_queue_sanity_checks
- */
- cq = &hdev->completion_queue[q->cq_id];
-
- cq->pi = hl_cq_inc_ptr(cq->pi);
-
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
@@ -509,19 +489,23 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
+ u32 max_queues;
int rc = 0, i, cq_cnt;
hdev->asic_funcs->hw_queues_lock(hdev);
if (hl_device_disabled_or_in_reset(hdev)) {
+ ctx->cs_counters.device_in_reset_drop_cnt++;
dev_err(hdev->dev,
"device is disabled or in reset, CS rejected!\n");
rc = -EPERM;
goto out;
}
+ max_queues = hdev->asic_prop.max_queues;
+
q = &hdev->kernel_queues[0];
- for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
if (cs->jobs_in_queue_cnt[i]) {
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
@@ -543,11 +527,12 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
break;
}
- if (rc)
+ if (rc) {
+ ctx->cs_counters.queue_full_drop_cnt++;
goto unroll_cq_resv;
+ }
- if (q->queue_type == QUEUE_TYPE_EXT ||
- q->queue_type == QUEUE_TYPE_HW)
+ if (q->queue_type == QUEUE_TYPE_EXT)
cq_cnt++;
}
}
@@ -598,10 +583,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
unroll_cq_resv:
q = &hdev->kernel_queues[0];
- for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT ||
- q->queue_type == QUEUE_TYPE_HW) &&
- cs->jobs_in_queue_cnt[i]) {
+ for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
+ if ((q->queue_type == QUEUE_TYPE_EXT) &&
+ (cs->jobs_in_queue_cnt[i])) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -625,7 +609,7 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- q->ci = hl_queue_inc_ptr(q->ci);
+ atomic_inc(&q->ci);
}
static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
@@ -660,12 +644,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
}
/* Make sure read/write pointers are initialized to start of queue */
- q->ci = 0;
+ atomic_set(&q->ci, 0);
q->pi = 0;
- if (!is_cpu_queue)
- hdev->asic_funcs->ext_queue_init(hdev, q->hw_queue_id);
-
return 0;
free_queue:
@@ -697,7 +678,7 @@ static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
q->kernel_address = (u64) (uintptr_t) p;
q->pi = 0;
- q->ci = 0;
+ atomic_set(&q->ci, 0);
return 0;
}
@@ -726,12 +707,48 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
q->kernel_address = (u64) (uintptr_t) p;
/* Make sure read/write pointers are initialized to start of queue */
- q->ci = 0;
+ atomic_set(&q->ci, 0);
q->pi = 0;
return 0;
}
+static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_hw_sob *hw_sob;
+ int sob, queue_idx = hdev->sync_stream_queue_idx++;
+
+ hw_queue->base_sob_id =
+ prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS;
+ hw_queue->base_mon_id =
+ prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS;
+ hw_queue->next_sob_val = 1;
+ hw_queue->curr_sob_offset = 0;
+
+ for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
+ hw_sob = &hw_queue->hw_sob[sob];
+ hw_sob->hdev = hdev;
+ hw_sob->sob_id = hw_queue->base_sob_id + sob;
+ hw_sob->q_idx = q_idx;
+ kref_init(&hw_sob->kref);
+ }
+}
+
+static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+
+ /*
+ * In case we got here due to a stuck CS, the refcnt might be bigger
+ * than 1 and therefore we reset it.
+ */
+ kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
+ hw_queue->curr_sob_offset = 0;
+ hw_queue->next_sob_val = 1;
+}
+
/*
* queue_init - main initialization function for H/W queue object
*
@@ -747,8 +764,6 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
{
int rc;
- BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
q->hw_queue_id = hw_queue_id;
switch (q->queue_type) {
@@ -774,6 +789,9 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
break;
}
+ if (q->supports_sync_stream)
+ sync_stream_queue_init(hdev, q->hw_queue_id);
+
if (rc)
return rc;
@@ -835,7 +853,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
struct hl_hw_queue *q;
int i, rc, q_ready_cnt;
- hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
+ hdev->kernel_queues = kcalloc(asic->max_queues,
sizeof(*hdev->kernel_queues), GFP_KERNEL);
if (!hdev->kernel_queues) {
@@ -845,9 +863,11 @@ int hl_hw_queues_create(struct hl_device *hdev)
/* Initialize the H/W queues */
for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
- i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
+ i < asic->max_queues ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
+ q->supports_sync_stream =
+ asic->hw_queues_props[i].supports_sync_stream;
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
@@ -870,9 +890,10 @@ release_queues:
void hl_hw_queues_destroy(struct hl_device *hdev)
{
struct hl_hw_queue *q;
+ u32 max_queues = hdev->asic_prop.max_queues;
int i;
- for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
+ for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -881,15 +902,17 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
{
struct hl_hw_queue *q;
+ u32 max_queues = hdev->asic_prop.max_queues;
int i;
- for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
+ for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
if ((!q->valid) ||
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
continue;
- q->pi = q->ci = 0;
+ q->pi = 0;
+ atomic_set(&q->ci, 0);
- if (q->queue_type == QUEUE_TYPE_EXT)
- hdev->asic_funcs->ext_queue_reset(hdev, q->hw_queue_id);
+ if (q->supports_sync_stream)
+ sync_stream_queue_reset(hdev, q->hw_queue_id);
}
}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index 8c6cd77e6af6..b997336fa75f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -10,7 +10,6 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
-#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
int hl_build_hwmon_channel_info(struct hl_device *hdev,
@@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, value);
+ 0, value);
if (rc) {
dev_err(hdev->dev,
@@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/common/irq.c
index fac65fbd70e8..c8db717023f5 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -10,11 +10,12 @@
#include <linux/slab.h>
/**
- * This structure is used to schedule work of EQ entry and armcp_reset event
+ * struct hl_eqe_work - This structure is used to schedule work of EQ
+ * entry and armcp_reset event
*
- * @eq_work - workqueue object to run when EQ entry is received
- * @hdev - pointer to device structure
- * @eq_entry - copy of the EQ entry
+ * @eq_work: workqueue object to run when EQ entry is received
+ * @hdev: pointer to device structure
+ * @eq_entry: copy of the EQ entry
*/
struct hl_eqe_work {
struct work_struct eq_work;
@@ -22,7 +23,7 @@ struct hl_eqe_work {
struct hl_eq_entry eq_entry;
};
-/*
+/**
* hl_cq_inc_ptr - increment ci or pi of cq
*
* @ptr: the current ci or pi value of the completion queue
@@ -38,7 +39,7 @@ inline u32 hl_cq_inc_ptr(u32 ptr)
return ptr;
}
-/*
+/**
* hl_eq_inc_ptr - increment ci of eq
*
* @ptr: the current ci value of the event queue
@@ -65,7 +66,7 @@ static void irq_handle_eqe(struct work_struct *work)
kfree(eqe_work);
}
-/*
+/**
* hl_irq_handler_cq - irq handler for completion queue
*
* @irq: irq number
@@ -118,15 +119,10 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if ((shadow_index_valid) && (!hdev->disabled)) {
job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
- queue_work(hdev->cq_wq, &job->finish_work);
+ queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
}
- /* Update ci of the context's queue. There is no
- * need to protect it with spinlock because this update is
- * done only inside IRQ and there is a different IRQ per
- * queue
- */
- queue->ci = hl_queue_inc_ptr(queue->ci);
+ atomic_inc(&queue->ci);
/* Clear CQ entry ready bit */
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
@@ -141,7 +137,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
-/*
+/**
* hl_irq_handler_eq - irq handler for event queue
*
* @irq: irq number
@@ -205,7 +201,7 @@ skip_irq:
return IRQ_HANDLED;
}
-/*
+/**
* hl_cq_init - main initialization function for an cq object
*
* @hdev: pointer to device structure
@@ -219,8 +215,6 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
void *p;
- BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
&q->bus_address, GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -237,7 +231,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
return 0;
}
-/*
+/**
* hl_cq_fini - destroy completion queue
*
* @hdev: pointer to device structure
@@ -268,7 +262,7 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
}
-/*
+/**
* hl_eq_init - main initialization function for an event queue object
*
* @hdev: pointer to device structure
@@ -281,8 +275,6 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
void *p;
- BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
HL_EQ_SIZE_IN_BYTES,
&q->bus_address);
@@ -296,7 +288,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
return 0;
}
-/*
+/**
* hl_eq_fini - destroy event queue
*
* @hdev: pointer to device structure
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/common/memory.c
index 47da84a17719..dce9273e557a 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -7,7 +7,7 @@
#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -1730,8 +1730,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
*/
if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
dev_notice(hdev->dev,
- "ctx %d is freed while it has va in use\n",
- ctx->asid);
+ "user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index a290d6b49d78..edcc11d5eaf1 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/genalloc.h>
#include <linux/slab.h>
@@ -502,7 +502,6 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
return 0;
mutex_init(&ctx->mmu_lock);
- hash_init(ctx->mmu_phys_hash);
hash_init(ctx->mmu_shadow_hash);
return dram_default_mapping_init(ctx);
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/common/pci.c
index 9f634ef6f5b3..7bd3737571f3 100644
--- a/drivers/misc/habanalabs/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -6,16 +6,22 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/pci/pci_general.h"
+#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
+#include <linux/bitfield.h>
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
+#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
+#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
+#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
+#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
+
/**
* hl_pci_bars_map() - Map PCI BARs.
* @hdev: Pointer to hl_device structure.
- * @bar_name: Array of BAR names.
+ * @name: Array of BAR names.
* @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
*
* Request PCI regions and map them to kernel virtual addresses.
@@ -61,7 +67,7 @@ err:
return rc;
}
-/*
+/**
* hl_pci_bars_unmap() - Unmap PCI BARS.
* @hdev: Pointer to hl_device structure.
*
@@ -80,9 +86,11 @@ static void hl_pci_bars_unmap(struct hl_device *hdev)
pci_release_regions(pdev);
}
-/*
+/**
* hl_pci_elbi_write() - Write through the ELBI interface.
* @hdev: Pointer to hl_device structure.
+ * @addr: Address to write to
+ * @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
@@ -140,6 +148,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
/**
* hl_pci_iatu_write() - iatu write routine.
* @hdev: Pointer to hl_device structure.
+ * @addr: Address to write to
+ * @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
@@ -161,7 +171,7 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
return 0;
}
-/*
+/**
* hl_pci_reset_link_through_bridge() - Reset PCI link.
* @hdev: Pointer to hl_device structure.
*/
@@ -183,110 +193,94 @@ static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
}
/**
- * hl_pci_set_dram_bar_base() - Set DDR BAR to map specific device address.
+ * hl_pci_set_inbound_region() - Configure inbound region
* @hdev: Pointer to hl_device structure.
- * @inbound_region: Inbound region number.
- * @bar: PCI BAR number.
- * @addr: Address in DRAM. Must be aligned to DRAM bar size.
+ * @region: Inbound region number.
+ * @pci_region: Inbound region parameters.
*
- * Configure the iATU so that the DRAM bar will start at the specified address.
+ * Configure the iATU inbound region.
*
* Return: 0 on success, negative value for failure.
*/
-int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
- u64 addr)
+int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
+ struct hl_inbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 offset;
- int rc;
+ u64 bar_phys_base, region_base, region_end_address;
+ u32 offset, ctrl_reg_val;
+ int rc = 0;
- switch (inbound_region) {
- case 0:
- offset = 0x100;
- break;
- case 1:
- offset = 0x300;
- break;
- case 2:
- offset = 0x500;
- break;
- default:
- dev_err(hdev->dev, "Invalid inbound region %d\n",
- inbound_region);
- return -EINVAL;
- }
+ /* region offset */
+ offset = (0x200 * region) + 0x100;
+
+ if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
+ bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
+ region_base = bar_phys_base + pci_region->offset_in_bar;
+ region_end_address = region_base + pci_region->size - 1;
- if (bar != 0 && bar != 2 && bar != 4) {
- dev_err(hdev->dev, "Invalid PCI BAR %d\n", bar);
- return -EINVAL;
+ rc |= hl_pci_iatu_write(hdev, offset + 0x8,
+ lower_32_bits(region_base));
+ rc |= hl_pci_iatu_write(hdev, offset + 0xC,
+ upper_32_bits(region_base));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x10,
+ lower_32_bits(region_end_address));
}
/* Point to the specified address */
- rc = hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(addr));
- rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(addr));
+ rc = hl_pci_iatu_write(hdev, offset + 0x14,
+ lower_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x18,
+ upper_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
- /* Enable + BAR match + match enable + BAR number */
- rc |= hl_pci_iatu_write(hdev, offset + 0x4, 0xC0080000 | (bar << 8));
+
+ /* Enable + bar/address match + match enable + bar number */
+ ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
+ pci_region->mode);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
+
+ if (pci_region->mode == PCI_BAR_MATCH_MODE)
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
+ pci_region->bar);
+
+ rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
/* Return the DBI window to the default location */
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
if (rc)
- dev_err(hdev->dev, "failed to map DRAM bar to 0x%08llx\n",
- addr);
+ dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
+ pci_region->bar, pci_region->addr);
return rc;
}
/**
- * hl_pci_init_iatu() - Initialize the iATU unit inside the PCI controller.
+ * hl_pci_set_outbound_region() - Configure outbound region 0
* @hdev: Pointer to hl_device structure.
- * @sram_base_address: SRAM base address.
- * @dram_base_address: DRAM base address.
- * @host_phys_base_address: Base physical address of host memory for device
- * transactions.
- * @host_phys_size: Size of host memory for device transactions.
+ * @pci_region: Outbound region parameters.
*
- * This is needed in case the firmware doesn't initialize the iATU.
+ * Configure the iATU outbound region 0.
*
* Return: 0 on success, negative value for failure.
*/
-int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
- u64 dram_base_address, u64 host_phys_base_address,
- u64 host_phys_size)
+int hl_pci_set_outbound_region(struct hl_device *hdev,
+ struct hl_outbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 host_phys_end_addr;
+ u64 outbound_region_end_address;
int rc = 0;
- /* Inbound Region 0 - Bar 0 - Point to SRAM base address */
- rc = hl_pci_iatu_write(hdev, 0x114, lower_32_bits(sram_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x118, upper_32_bits(sram_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x100, 0);
- /* Enable + Bar match + match enable */
- rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
-
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
-
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
- /* Point to DRAM */
- if (!hdev->asic_funcs->set_dram_bar_base)
- return -EINVAL;
- if (hdev->asic_funcs->set_dram_bar_base(hdev, dram_base_address) ==
- U64_MAX)
- return -EIO;
-
- /* Outbound Region 0 - Point to Host */
- host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
+ /* Outbound Region 0 */
+ outbound_region_end_address =
+ pci_region->addr + pci_region->size - 1;
rc |= hl_pci_iatu_write(hdev, 0x008,
- lower_32_bits(host_phys_base_address));
+ lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, 0x00C,
- upper_32_bits(host_phys_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
+ upper_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, 0x010,
+ lower_32_bits(outbound_region_end_address));
rc |= hl_pci_iatu_write(hdev, 0x014, 0);
if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
@@ -294,7 +288,8 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
else
rc |= hl_pci_iatu_write(hdev, 0x018, 0);
- rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
+ rc |= hl_pci_iatu_write(hdev, 0x020,
+ upper_32_bits(outbound_region_end_address));
/* Increase region size */
rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
/* Enable */
@@ -304,16 +299,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
- if (rc)
- return -EIO;
-
- return 0;
+ return rc;
}
/**
* hl_pci_set_dma_mask() - Set DMA masks for the device.
* @hdev: Pointer to hl_device structure.
- * @dma_mask: number of bits for the requested dma mask.
*
* This function sets the DMA masks (regular and consistent) for a specified
* value. If it doesn't succeed, it tries to set it to a fall-back value
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 5d78d5e1c782..b3cb0ac4721c 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,9 +9,6 @@
#include <linux/pci.h>
-#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
-#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
-
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct armcp_packet pkt;
@@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_CLK_PKT_TIMEOUT, &result);
+ 0, &result);
if (rc) {
dev_err(hdev->dev,
@@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_CLK_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev,
@@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_PWR_PKT_TIMEOUT, &result);
+ 0, &result);
if (rc) {
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
@@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SET_PWR_PKT_TIMEOUT, NULL);
+ 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
@@ -334,6 +331,9 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
char *data;
int rc;
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
if (!max_size)
return -EINVAL;
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
index f802cdc980ca..c9f4703cff24 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-subdir-ccflags-y += -I$(src)
-
HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 61f88e9884ce..00a0a7238d81 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -6,12 +6,12 @@
*/
#include "gaudiP.h"
-#include "include/hw_ip/mmu/mmu_general.h"
-#include "include/hw_ip/mmu/mmu_v1_1.h"
-#include "include/gaudi/gaudi_masks.h"
-#include "include/gaudi/gaudi_fw_if.h"
-#include "include/gaudi/gaudi_reg_map.h"
-#include "include/gaudi/gaudi_async_ids_map_extended.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v1_1.h"
+#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_fw_if.h"
+#include "../include/gaudi/gaudi_reg_map.h"
+#include "../include/gaudi/gaudi_async_ids_map_extended.h"
#include <linux/module.h>
#include <linux/pci.h>
@@ -21,6 +21,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
+#include <linux/bitfield.h>
/*
* Gaudi security scheme:
@@ -74,12 +75,12 @@
#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
-#define GAUDI_PLDM_SRESET_TIMEOUT_MSEC 14000 /* 14s */
#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
@@ -96,7 +97,12 @@
#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
-#define GAUDI_ARB_WDT_TIMEOUT 0x400000
+#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
+
+#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
+ BIT(GAUDI_ENGINE_ID_MME_0) |\
+ BIT(GAUDI_ENGINE_ID_MME_2) |\
+ GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
@@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
};
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
- [GAUDI_PCI_DMA_1] = 0,
- [GAUDI_PCI_DMA_2] = 1,
- [GAUDI_PCI_DMA_3] = 5,
- [GAUDI_HBM_DMA_1] = 2,
- [GAUDI_HBM_DMA_2] = 3,
- [GAUDI_HBM_DMA_3] = 4,
- [GAUDI_HBM_DMA_4] = 6,
- [GAUDI_HBM_DMA_5] = 7
+ [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
+ [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
+ [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
+ [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
+ [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
+ [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
+ [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
+ [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
};
static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
@@ -315,6 +321,13 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
+struct ecc_info_extract_params {
+ u64 block_address;
+ u32 num_memories;
+ bool derr;
+ bool disable_clock_gating;
+};
+
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
u64 phys_addr);
static int gaudi_send_job_on_qman0(struct hl_device *hdev,
@@ -333,22 +346,25 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
- if (GAUDI_QUEUE_ID_SIZE >= HL_MAX_QUEUES) {
- dev_err(hdev->dev,
- "Number of H/W queues must be smaller than %d\n",
- HL_MAX_QUEUES);
- return -EFAULT;
- }
+ prop->max_queues = GAUDI_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues,
+ sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
- for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
+ for (i = 0 ; i < prop->max_queues ; i++) {
if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 1;
+ prop->hw_queues_props[i].supports_sync_stream = 1;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
prop->hw_queues_props[i].requires_kernel_cb = 0;
+ prop->hw_queues_props[i].supports_sync_stream = 0;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
@@ -357,14 +373,13 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 0;
+ prop->hw_queues_props[i].supports_sync_stream = 0;
}
}
- for (; i < HL_MAX_QUEUES; i++)
- prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
-
+ prop->sync_stream_first_sob = 0;
+ prop->sync_stream_first_mon = 0;
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = GAUDI_HBM_SIZE_32GB;
prop->dram_end_address = prop->dram_base_address +
@@ -429,6 +444,8 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+ prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
+
return 0;
}
@@ -451,6 +468,7 @@ static int gaudi_pci_bars_map(struct hl_device *hdev)
static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
@@ -458,7 +476,10 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
return old_addr;
/* Inbound Region 2 - Bar 4 - Point to HBM */
- rc = hl_pci_set_dram_bar_base(hdev, 2, 4, addr);
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = HBM_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
if (rc)
return U64_MAX;
@@ -472,22 +493,43 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
static int gaudi_init_iatu(struct hl_device *hdev)
{
- int rc = 0;
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_BAR_ID;
+ inbound_region.addr = SRAM_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ goto done;
/* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
- rc = hl_pci_iatu_write(hdev, 0x314,
- lower_32_bits(SPI_FLASH_BASE_ADDR));
- rc |= hl_pci_iatu_write(hdev, 0x318,
- upper_32_bits(SPI_FLASH_BASE_ADDR));
- rc |= hl_pci_iatu_write(hdev, 0x300, 0);
- /* Enable + Bar match + match enable */
- rc |= hl_pci_iatu_write(hdev, 0x304, 0xC0080200);
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = CFG_BAR_ID;
+ inbound_region.addr = SPI_FLASH_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ goto done;
+ /* Inbound Region 2 - Bar 4 - Point to HBM */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = HBM_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
if (rc)
- return -EIO;
+ goto done;
+
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
- return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
- HOST_PHYS_BASE, HOST_PHYS_SIZE);
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE;
+ outbound_region.size = HOST_PHYS_SIZE;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+done:
+ return rc;
}
static int gaudi_early_init(struct hl_device *hdev)
@@ -510,7 +552,8 @@ static int gaudi_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
SRAM_BAR_ID),
SRAM_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
@@ -520,20 +563,26 @@ static int gaudi_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
CFG_BAR_ID),
CFG_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
rc = hl_pci_init(hdev);
if (rc)
- return rc;
+ goto free_queue_props;
return 0;
+
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
}
static int gaudi_early_fini(struct hl_device *hdev)
{
+ kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
@@ -548,11 +597,36 @@ static int gaudi_early_fini(struct hl_device *hdev)
static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 trace_freq = 0;
+ u32 pll_clk = 0;
+ u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ u32 nr = RREG32(mmPSOC_CPU_PLL_NR);
+ u32 nf = RREG32(mmPSOC_CPU_PLL_NF);
+ u32 od = RREG32(mmPSOC_CPU_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ trace_freq = PLL_REF_CLK;
+ else
+ trace_freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ trace_freq = pll_clk;
+ else
+ trace_freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d", div_sel);
+ }
- prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
- prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
- prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
- prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
}
static int _gaudi_init_tpc_mem(struct hl_device *hdev,
@@ -567,7 +641,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
u8 tpc_id;
int rc;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
@@ -1638,8 +1712,8 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
hbm0_wr = 0x33333333;
- hbm1_wr = 0x33333333;
hbm0_rd = 0x77777777;
+ hbm1_wr = 0x55555555;
hbm1_rd = 0xDDDDDDDD;
WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
@@ -1689,125 +1763,6 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
}
-static void gaudi_init_rate_limiter(struct hl_device *hdev)
-{
- u32 nr, nf, od, sat, rst, timeout;
- u64 freq;
-
- nr = RREG32(mmPSOC_HBM_PLL_NR);
- nf = RREG32(mmPSOC_HBM_PLL_NF);
- od = RREG32(mmPSOC_HBM_PLL_OD);
- freq = (50 * (nf + 1)) / ((nr + 1) * (od + 1));
-
- dev_dbg(hdev->dev, "HBM frequency is %lluMHz\n", freq);
-
- /* Configuration is for five (5) DDMA channels */
- if (freq == 800) {
- sat = 4;
- rst = 11;
- timeout = 15;
- } else if (freq == 900) {
- sat = 4;
- rst = 15;
- timeout = 16;
- } else if (freq == 950) {
- sat = 4;
- rst = 15;
- timeout = 15;
- } else {
- dev_warn(hdev->dev,
- "unsupported HBM frequency %lluMHz, no rate-limiters\n",
- freq);
- return;
- }
-
- WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1, 0x111);
-
- if (!hdev->rl_enable) {
- dev_info(hdev->dev, "Rate limiters disabled\n");
- return;
- }
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT, sat);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST, rst);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN, 1);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT, sat);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST, rst);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN, 1);
-}
-
static void gaudi_init_golden_registers(struct hl_device *hdev)
{
u32 tpc_offset;
@@ -1817,9 +1772,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_hbm_cred(hdev);
- gaudi_init_rate_limiter(hdev);
-
- gaudi_disable_clock_gating(hdev);
+ hdev->asic_funcs->disable_clock_gating(hdev);
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
@@ -1839,9 +1792,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
-
- /* WA for H3-2081 */
- WREG32(mmPCIE_WRAP_MAX_OUTSTAND, 0x10ff);
}
static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
@@ -1893,6 +1843,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
+ WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
+
/* The following configuration is needed only once per QMAN */
if (qman_id == 0) {
/* Configure RAZWI IRQ */
@@ -2529,46 +2481,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev)
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
}
-static void gaudi_enable_clock_gating(struct hl_device *hdev)
+static void gaudi_set_clock_gating(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 qman_offset;
int i;
- if (!hdev->clock_gating)
- return;
-
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
- return;
-
/* In case we are during debug session, don't enable the clock gate
* as it may interfere
*/
if (hdev->in_debug)
return;
- for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
+ for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
+ if (!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i]))))
+ continue;
+
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_UPPER_CP_CGM_PWR_GATE_EN);
}
- for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
+ for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
+ if (!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i]))))
+ continue;
+
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
- WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME0_QM_CGM_CFG,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
- WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME2_QM_CGM_CFG,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
+ WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ }
+
+ if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
+ WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ }
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ if (!(hdev->clock_gating_mask &
+ (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
+ continue;
+
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
QMAN_CGM1_PWR_GATE_EN);
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
@@ -2632,36 +2593,23 @@ static void gaudi_disable_timestamp(struct hl_device *hdev)
static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
{
- u32 wait_timeout_ms, cpu_timeout_ms;
+ u32 wait_timeout_ms;
dev_info(hdev->dev,
"Halting compute engines and disabling interrupts\n");
- if (hdev->pldm) {
+ if (hdev->pldm)
wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
- cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
- } else {
+ else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
- cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
- }
- if (hard_reset) {
- /*
- * I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
- */
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_HALT_MACHINE);
- msleep(cpu_timeout_ms);
- }
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
gaudi_stop_hbm_dma_qmans(hdev);
gaudi_stop_pci_dma_qmans(hdev);
- gaudi_disable_clock_gating(hdev);
+ hdev->asic_funcs->disable_clock_gating(hdev);
msleep(wait_timeout_ms);
@@ -2679,10 +2627,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_disable_timestamp(hdev);
- if (hard_reset)
- gaudi_disable_msi(hdev);
- else
- gaudi_sync_irqs(hdev);
+ gaudi_disable_msi(hdev);
}
static int gaudi_mmu_init(struct hl_device *hdev)
@@ -2716,8 +2661,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
- VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
@@ -2725,6 +2669,12 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_HOP_CONFIGURATION,
hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
+ /*
+ * The H/W expects the first PI after init to be 1. After wraparound
+ * we'll write 0.
+ */
+ gaudi->mmu_cache_inv_pi = 1;
+
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
@@ -2961,16 +2911,6 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_hbm_dma_qmans(hdev);
- /*
- * Before pushing u-boot/linux to device, need to set the hbm bar to
- * base address of dram
- */
- if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
- dev_err(hdev->dev,
- "failed to map HBM bar to DRAM base address\n");
- return -EIO;
- }
-
rc = gaudi_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
@@ -2995,7 +2935,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_tpc_qmans(hdev);
- gaudi_enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
gaudi_enable_timestamp(hdev);
@@ -3029,48 +2969,56 @@ disable_queues:
static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- u32 status, reset_timeout_ms, boot_strap = 0;
+ u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0;
+
+ if (!hard_reset) {
+ dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
+ return;
+ }
if (hdev->pldm) {
- if (hard_reset)
- reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
- else
- reset_timeout_ms = GAUDI_PLDM_SRESET_TIMEOUT_MSEC;
+ reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
} else {
reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
- if (hard_reset) {
- /* Tell ASIC not to re-initialize PCIe */
- WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+ /* Set device to handle FLR by H/W as we will put the device CPU to
+ * halt mode
+ */
+ WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
+ PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
- boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
- /* H/W bug WA:
- * rdata[31:0] = strap_read_val;
- * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
- */
- boot_strap = (((boot_strap & 0x7FE00000) << 1) |
- (boot_strap & 0x001FFFFF));
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
-
- /* Restart BTL/BLR upon hard-reset */
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
-
- WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
- 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
- } else {
- /* Don't restart BTL/BLR upon soft-reset */
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 0);
+ /* I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
- WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST,
- 1 << PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT);
- dev_info(hdev->dev,
- "Issued SOFT reset command, going to wait %dms\n",
- reset_timeout_ms);
- }
+ msleep(cpu_timeout_ms);
+
+ /* Tell ASIC not to re-initialize PCIe */
+ WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+
+ boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+
+ /* H/W bug WA:
+ * rdata[31:0] = strap_read_val;
+ * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
+ */
+ boot_strap = (((boot_strap & 0x7FE00000) << 1) |
+ (boot_strap & 0x001FFFFF));
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
+
+ /* Restart BTL/BLR upon hard-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -3084,18 +3032,6 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
"Timeout while waiting for device to reset 0x%x\n",
status);
- if (!hard_reset) {
- gaudi->hw_cap_initialized &= ~(HW_CAP_PCI_DMA | HW_CAP_MME |
- HW_CAP_TPC_MASK |
- HW_CAP_HBM_DMA);
-
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_SOFT_RESET);
- return;
- }
-
- /* We continue here only for hard-reset */
-
WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
@@ -3104,7 +3040,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_HBM_DMA | HW_CAP_PLL |
HW_CAP_MMU |
HW_CAP_SRAM_SCRAMBLER |
- HW_CAP_HBM_SCRAMBLER);
+ HW_CAP_HBM_SCRAMBLER |
+ HW_CAP_CLK_GATE);
+
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
}
@@ -3455,6 +3393,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
return 0;
}
+ if (!timeout)
+ timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
+
return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@@ -3550,7 +3491,7 @@ static int gaudi_test_queues(struct hl_device *hdev)
{
int i, rc, ret_val = 0;
- for (i = 0 ; i < HL_MAX_QUEUES ; i++) {
+ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) {
if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
rc = gaudi_test_queue(hdev, i);
if (rc)
@@ -3790,6 +3731,25 @@ static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
src_in_host);
}
+static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_load_and_exe *user_pkt)
+{
+ u32 cfg;
+
+ cfg = le32_to_cpu(user_pkt->cfg);
+
+ if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
+ dev_err(hdev->dev,
+ "User not allowed to use Load and Execute\n");
+ return -EPERM;
+ }
+
+ parser->patched_cb_size += sizeof(struct packet_load_and_exe);
+
+ return 0;
+}
+
static int gaudi_validate_cb(struct hl_device *hdev,
struct hl_cs_parser *parser, bool is_mmu)
{
@@ -3838,6 +3798,17 @@ static int gaudi_validate_cb(struct hl_device *hdev,
rc = -EPERM;
break;
+ case PACKET_WREG_BULK:
+ dev_err(hdev->dev,
+ "User not allowed to use WREG_BULK\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_LOAD_AND_EXE:
+ rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
+ (struct packet_load_and_exe *) user_pkt);
+ break;
+
case PACKET_LIN_DMA:
parser->contains_dma_pkt = true;
if (is_mmu)
@@ -3848,14 +3819,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
break;
case PACKET_WREG_32:
- case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_REPEAT:
case PACKET_FENCE:
case PACKET_NOP:
case PACKET_ARB_POINT:
- case PACKET_LOAD_AND_EXE:
parser->patched_cb_size += pkt_size;
break;
@@ -4103,9 +4072,8 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
@@ -4177,9 +4145,8 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -4308,11 +4275,11 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
{
struct packet_lin_dma *lin_dma_pkt;
struct hl_cs_job *job;
- u32 cb_size, ctl;
+ u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
int rc;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
@@ -4337,6 +4304,15 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
goto release_cb;
}
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
+ if (err_cause && !hdev->init_done) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
+ }
+
job->id = 0;
job->user_cb = cb;
job->user_cb->cs_cnt++;
@@ -4348,11 +4324,23 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
hl_debugfs_add_job(hdev, job);
rc = gaudi_send_job_on_qman0(hdev, job);
-
hl_debugfs_remove_job(hdev, job);
kfree(job);
cb->cs_cnt--;
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
+ if (err_cause) {
+ dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
+ rc = -EIO;
+ if (!hdev->init_done) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
+ }
+ }
+
release_cb:
hl_cb_put(cb);
hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
@@ -4490,13 +4478,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
*val = RREG32(addr - CFG_BASE);
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
*val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4532,13 +4525,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
WREG32(addr - CFG_BASE, val);
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4574,7 +4572,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
@@ -4584,6 +4586,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = (((u64) val_h) << 32) | val_l;
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
*val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4620,7 +4623,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
- if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+
+ if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
+ (hdev->clock_gating_mask &
+ GAUDI_CLK_GATE_DEBUGFS_MASK)) {
+
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
@@ -4629,6 +4636,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
WREG32(addr + sizeof(u32) - CFG_BASE,
upper_32_bits(val));
}
+
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
@@ -4850,7 +4858,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
@@ -5178,62 +5186,76 @@ static void gaudi_print_mmu_error_info(struct hl_device *hdev)
* | |0xF4C memory wrappers 127:96 |
* +-------------------+------------------------------------------------------+
*/
-static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
- const char *block_name,
- u64 block_address, int num_memories,
- bool derr, bool disable_clock_gating)
+static int gaudi_extract_ecc_info(struct hl_device *hdev,
+ struct ecc_info_extract_params *params, u64 *ecc_address,
+ u64 *ecc_syndrom, u8 *memory_wrapper_idx)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- int num_mem_regs = num_memories / 32 + ((num_memories % 32) ? 1 : 0);
+ u32 i, num_mem_regs, reg, err_bit;
+ u64 err_addr, err_word = 0;
+ int rc = 0;
- if (block_address >= CFG_BASE)
- block_address -= CFG_BASE;
+ num_mem_regs = params->num_memories / 32 +
+ ((params->num_memories % 32) ? 1 : 0);
- if (derr)
- block_address += GAUDI_ECC_DERR0_OFFSET;
+ if (params->block_address >= CFG_BASE)
+ params->block_address -= CFG_BASE;
+
+ if (params->derr)
+ err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET;
else
- block_address += GAUDI_ECC_SERR0_OFFSET;
+ err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET;
- if (disable_clock_gating) {
+ if (params->disable_clock_gating) {
mutex_lock(&gaudi->clk_gate_mutex);
hdev->asic_funcs->disable_clock_gating(hdev);
}
- switch (num_mem_regs) {
- case 1:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x\n",
- block_name, RREG32(block_address));
- break;
- case 2:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4));
- break;
- case 3:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4),
- RREG32(block_address + 8));
- break;
- case 4:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4),
- RREG32(block_address + 8), RREG32(block_address + 0xc));
- break;
- default:
- break;
+ /* Set invalid wrapper index */
+ *memory_wrapper_idx = 0xFF;
+ /* Iterate through memory wrappers, a single bit must be set */
+ for (i = 0 ; i > num_mem_regs ; i++) {
+ err_addr += i * 4;
+ err_word = RREG32(err_addr);
+ if (err_word) {
+ err_bit = __ffs(err_word);
+ *memory_wrapper_idx = err_bit + (32 * i);
+ break;
+ }
}
- if (disable_clock_gating) {
- hdev->asic_funcs->enable_clock_gating(hdev);
+ if (*memory_wrapper_idx == 0xFF) {
+ dev_err(hdev->dev, "ECC error information cannot be found\n");
+ rc = -EINVAL;
+ goto enable_clk_gate;
+ }
+
+ WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET,
+ *memory_wrapper_idx);
+
+ *ecc_address =
+ RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET);
+ *ecc_syndrom =
+ RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET);
+
+ /* Clear error indication */
+ reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET);
+ if (params->derr)
+ reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1);
+ else
+ reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1);
+
+ WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg);
+
+enable_clk_gate:
+ if (params->disable_clock_gating) {
+ hdev->asic_funcs->set_clock_gating(hdev);
+
mutex_unlock(&gaudi->clk_gate_mutex);
}
+
+ return rc;
}
static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
@@ -5286,239 +5308,99 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
}
-static void gaudi_print_ecc_info(struct hl_device *hdev, u16 event_type)
+static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_ecc_data *ecc_data)
{
- u64 block_address;
- u8 index;
- int num_memories;
- char desc[32];
- bool derr;
- bool disable_clock_gating;
+ struct ecc_info_extract_params params;
+ u64 ecc_address = 0, ecc_syndrom = 0;
+ u8 index, memory_wrapper_idx = 0;
+ bool extract_info_from_fw;
+ int rc;
switch (event_type) {
- case GAUDI_EVENT_PCIE_CORE_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
- block_address = mmPCIE_CORE_BASE;
- num_memories = 51;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_CORE_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
- block_address = mmPCIE_CORE_BASE;
- num_memories = 51;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_IF_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
- block_address = mmPCIE_WRAP_BASE;
- num_memories = 11;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_IF_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
- block_address = mmPCIE_WRAP_BASE;
- num_memories = 11;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_PHY_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
- block_address = mmPCIE_PHY_BASE;
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_PHY_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
- block_address = mmPCIE_PHY_BASE;
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
+ case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
+ extract_info_from_fw = true;
break;
case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
index = event_type - GAUDI_EVENT_TPC0_SERR;
- block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
- num_memories = 90;
- derr = false;
- disable_clock_gating = true;
+ params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ params.num_memories = 90;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
index = event_type - GAUDI_EVENT_TPC0_DERR;
- block_address =
+ params.block_address =
mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
- num_memories = 90;
- derr = true;
- disable_clock_gating = true;
+ params.num_memories = 90;
+ params.derr = true;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_SERR:
case GAUDI_EVENT_MME1_ACC_SERR:
case GAUDI_EVENT_MME2_ACC_SERR:
case GAUDI_EVENT_MME3_ACC_SERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
- block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
- num_memories = 128;
- derr = false;
- disable_clock_gating = true;
+ params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 128;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_DERR:
case GAUDI_EVENT_MME1_ACC_DERR:
case GAUDI_EVENT_MME2_ACC_DERR:
case GAUDI_EVENT_MME3_ACC_DERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
- block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
- num_memories = 128;
- derr = true;
- disable_clock_gating = true;
+ params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 128;
+ params.derr = true;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_SERR:
case GAUDI_EVENT_MME1_SBAB_SERR:
case GAUDI_EVENT_MME2_SBAB_SERR:
case GAUDI_EVENT_MME3_SBAB_SERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
- block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
- num_memories = 33;
- derr = false;
- disable_clock_gating = true;
+ params.block_address =
+ mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 33;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_DERR:
case GAUDI_EVENT_MME1_SBAB_DERR:
case GAUDI_EVENT_MME2_SBAB_DERR:
case GAUDI_EVENT_MME3_SBAB_DERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
- block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
- num_memories = 33;
- derr = true;
- disable_clock_gating = true;
- break;
- case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
- index = event_type - GAUDI_EVENT_DMA0_SERR_ECC;
- block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
- num_memories = 16;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
- index = event_type - GAUDI_EVENT_DMA0_DERR_ECC;
- block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
- num_memories = 16;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_CPU_IF_ECC_SERR:
- block_address = mmCPU_IF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_CPU_IF_ECC_DERR:
- block_address = mmCPU_IF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_MEM_SERR:
- block_address = mmPSOC_GLOBAL_CONF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_MEM_DERR:
- block_address = mmPSOC_GLOBAL_CONF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
- block_address = mmPSOC_CS_TRACE_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 2;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
- block_address = mmPSOC_CS_TRACE_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 2;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
- index = event_type - GAUDI_EVENT_SRAM0_SERR;
- block_address =
- mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
- num_memories = 2;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
- index = event_type - GAUDI_EVENT_SRAM0_DERR;
- block_address =
- mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
- num_memories = 2;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
- index = event_type - GAUDI_EVENT_DMA_IF0_SERR;
- block_address = mmDMA_IF_W_S_BASE +
- index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
- num_memories = 60;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
- index = event_type - GAUDI_EVENT_DMA_IF0_DERR;
- block_address = mmDMA_IF_W_S_BASE +
- index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
- derr = true;
- num_memories = 60;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
- index = event_type - GAUDI_EVENT_HBM_0_SERR;
- /* HBM Registers are at different offsets */
- block_address = mmHBM0_BASE + 0x8000 +
- index * (mmHBM1_BASE - mmHBM0_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
- derr = false;
- num_memories = 64;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
- index = event_type - GAUDI_EVENT_HBM_0_SERR;
- /* HBM Registers are at different offsets */
- block_address = mmHBM0_BASE + 0x8000 +
- index * (mmHBM1_BASE - mmHBM0_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
- derr = true;
- num_memories = 64;
- disable_clock_gating = false;
- break;
+ params.block_address =
+ mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 33;
+ params.derr = true;
+ params.disable_clock_gating = true;
default:
return;
}
- gaudi_print_ecc_info_generic(hdev, desc, block_address, num_memories,
- derr, disable_clock_gating);
+ if (extract_info_from_fw) {
+ ecc_address = le64_to_cpu(ecc_data->ecc_address);
+ ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
+ memory_wrapper_idx = ecc_data->memory_wrapper_idx;
+ } else {
+ rc = gaudi_extract_ecc_info(hdev, &params, &ecc_address,
+ &ecc_syndrom, &memory_wrapper_idx);
+ if (rc)
+ return;
+ }
+
+ dev_err(hdev->dev,
+ "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n",
+ ecc_address, ecc_syndrom, memory_wrapper_idx);
}
static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
@@ -5568,8 +5450,6 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
- gaudi_print_ecc_info(hdev, event_type);
-
if (razwi) {
gaudi_print_razwi_info(hdev);
gaudi_print_mmu_error_info(hdev);
@@ -5718,7 +5598,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
/* Clear interrupts */
WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -5799,10 +5679,15 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
- fallthrough;
- case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
case GAUDI_EVENT_MMU_DERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_AXI_ECC:
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
@@ -5898,6 +5783,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
fallthrough;
case GAUDI_EVENT_MMU_SERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
case GAUDI_EVENT_PCIE_DEC:
case GAUDI_EVENT_MME0_WBC_RSP:
case GAUDI_EVENT_MME0_SBAB0_RSP:
@@ -5994,6 +5884,8 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
mutex_lock(&hdev->mmu_cache_lock);
/* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_PS, 3);
+ WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
WREG32(mmSTLB_INV_PS, 2);
rc = hl_poll_timeout(
@@ -6232,7 +6124,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
if (s)
seq_puts(s, "\n");
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -6333,7 +6225,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
dev_err(hdev->dev,
"Timeout while waiting for TPC%d icache prefetch\n",
tpc_id);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
return -EIO;
}
@@ -6362,7 +6254,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
1000,
kernel_timeout);
- hdev->asic_funcs->enable_clock_gating(hdev);
+ hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
if (rc) {
@@ -6380,47 +6272,14 @@ static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
-static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
-{
- return gaudi_cq_assignment[cq_idx];
-}
-
-static void gaudi_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+static int gaudi_ctx_init(struct hl_ctx *ctx)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
- struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
- struct hl_hw_sob *hw_sob;
- int sob, ext_idx = gaudi->ext_queue_idx++;
-
- /*
- * The external queues might not sit sequentially, hence use the
- * real external queue index for the SOB/MON base id.
- */
- hw_queue->base_sob_id = ext_idx * HL_RSVD_SOBS;
- hw_queue->base_mon_id = ext_idx * HL_RSVD_MONS;
- hw_queue->next_sob_val = 1;
- hw_queue->curr_sob_offset = 0;
-
- for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
- hw_sob = &hw_queue->hw_sob[sob];
- hw_sob->hdev = hdev;
- hw_sob->sob_id = hw_queue->base_sob_id + sob;
- hw_sob->q_idx = q_idx;
- kref_init(&hw_sob->kref);
- }
+ return 0;
}
-static void gaudi_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
- struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
-
- /*
- * In case we got here due to a stuck CS, the refcnt might be bigger
- * than 1 and therefore we reset it.
- */
- kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
- hw_queue->curr_sob_offset = 0;
- hw_queue->next_sob_val = 1;
+ return gaudi_cq_assignment[cq_idx];
}
static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
@@ -6445,16 +6304,17 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
pkt = (struct packet_msg_short *) (uintptr_t) cb->kernel_address;
memset(pkt, 0, sizeof(*pkt));
- value = 1 << GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT; /* inc by 1 */
- value |= 1 << GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT; /* add mode */
+ /* Inc by 1, Mode ADD */
+ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
- ctl = (sob_id * 4) << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT; /* SOB id */
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
- ctl |= 3 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S SOB base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6467,12 +6327,12 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
memset(pkt, 0, pkt_size);
- ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
- ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_MB_SHIFT; /* only last pkt needs MB */
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6488,18 +6348,19 @@ static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
memset(pkt, 0, pkt_size);
- value = (sob_id / 8) << GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT;
- value |= sob_val << GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT;
- value |= 0 << GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT; /* GREATER_OR_EQUAL */
- value |= mask << GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT;
+ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
+ 0); /* GREATER OR EQUAL*/
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
- ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
- ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6513,15 +6374,14 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
memset(pkt, 0, pkt_size);
- cfg = 1 << GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT;
- cfg |= 1 << GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT;
- cfg |= 2 << GAUDI_PKT_FENCE_CFG_ID_SHIFT;
+ cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
- ctl = 0 << GAUDI_PKT_FENCE_CTL_PRED_SHIFT;
- ctl |= PACKET_FENCE << GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_FENCE_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_FENCE_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_FENCE_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
@@ -6703,7 +6563,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
.send_heartbeat = gaudi_send_heartbeat,
- .enable_clock_gating = gaudi_enable_clock_gating,
+ .set_clock_gating = gaudi_set_clock_gating,
.disable_clock_gating = gaudi_disable_clock_gating,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
@@ -6720,13 +6580,12 @@ static const struct hl_asic_funcs gaudi_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = gaudi_halt_coresight,
+ .ctx_init = gaudi_ctx_init,
.get_clk_rate = gaudi_get_clk_rate,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.read_device_fw_version = gaudi_read_device_fw_version,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
- .ext_queue_init = gaudi_ext_queue_init,
- .ext_queue_reset = gaudi_ext_queue_reset,
.get_signal_cb_size = gaudi_get_signal_cb_size,
.get_wait_cb_size = gaudi_get_wait_cb_size,
.gen_signal_cb = gaudi_gen_signal_cb,
@@ -6739,7 +6598,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
/**
* gaudi_set_asic_funcs - set GAUDI function pointers
*
- * @*hdev: pointer to hl_device structure
+ * @hdev: pointer to hl_device structure
*
*/
void gaudi_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index a46530d375fa..5dc99f6f0296 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -9,11 +9,11 @@
#define GAUDIP_H_
#include <uapi/misc/habanalabs.h>
-#include "habanalabs.h"
-#include "include/hl_boot_if.h"
-#include "include/gaudi/gaudi_packets.h"
-#include "include/gaudi/gaudi.h"
-#include "include/gaudi/gaudi_async_events.h"
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/gaudi/gaudi_packets.h"
+#include "../include/gaudi/gaudi.h"
+#include "../include/gaudi/gaudi_async_events.h"
#define NUMBER_OF_EXT_HW_QUEUES 12
#define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES
@@ -57,6 +57,12 @@
#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+#define GAUDI_MAX_PENDING_CS 1024
+
+#if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS)
+#error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
#define PCI_DMA_NUMBER_OF_CHNLS 3
#define HBM_DMA_NUMBER_OF_CHNLS 5
#define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \
@@ -117,14 +123,14 @@
/* Internal QMANs PQ sizes */
-#define MME_QMAN_LENGTH 64
+#define MME_QMAN_LENGTH 1024
#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define HBM_DMA_QMAN_LENGTH 64
+#define HBM_DMA_QMAN_LENGTH 1024
#define HBM_DMA_QMAN_SIZE_IN_BYTES \
(HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define TPC_QMAN_LENGTH 64
+#define TPC_QMAN_LENGTH 1024
#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
#define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START
@@ -228,7 +234,8 @@ struct gaudi_internal_qman_info {
* engine.
* @multi_msi_mode: whether we are working in multi MSI single MSI mode.
* Multi MSI is possible only with IOMMU enabled.
- * @ext_queue_idx: helper index for external queues initialization.
+ * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
+ * 8-bit value so use u8.
*/
struct gaudi_device {
int (*armcp_info_get)(struct hl_device *hdev);
@@ -247,7 +254,7 @@ struct gaudi_device {
u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
u32 hw_cap_initialized;
u8 multi_msi_mode;
- u8 ext_queue_idx;
+ u8 mmu_cache_inv_pi;
};
void gaudi_init_security(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index bf0e062d7b87..5673ee49819e 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -6,9 +6,9 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/gaudi_coresight.h"
-#include "include/gaudi/asic_reg/gaudi_regs.h"
-#include "include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_coresight.h"
+#include "../include/gaudi/asic_reg/gaudi_regs.h"
+#include "../include/gaudi/gaudi_masks.h"
#include <uapi/misc/habanalabs.h>
#include <linux/coresight.h>
@@ -392,6 +392,7 @@ static int gaudi_config_stm(struct hl_device *hdev,
{
struct hl_debug_params_stm *input;
u64 base_reg;
+ u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
@@ -420,7 +421,10 @@ static int gaudi_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask));
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
- WREG32(base_reg + 0xE8C, input->frequency);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
/* SW-2176 - SW WA for HW bug */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 6dd2c2a1cd70..1076b4932ce2 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -6,7 +6,7 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/gaudi_fw_if.h"
+#include "../include/gaudi/gaudi_fw_if.h"
void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
{
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 6a351e31fa6a..8d5d6ddee6ed 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -6,7 +6,7 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/asic_reg/gaudi_regs.h"
+#include "../include/gaudi/asic_reg/gaudi_regs.h"
#define GAUDI_NUMBER_OF_RR_REGS 24
#define GAUDI_NUMBER_OF_LBW_RANGES 12
@@ -447,8 +447,7 @@ static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
* gaudi_set_block_as_protected - set the given block as protected
*
* @hdev: pointer to hl_device structure
- * @block: block base address
- *
+ * @base: block base address
*/
static void gaudi_pb_set_block(struct hl_device *hdev, u64 base)
{
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
index bd769083628e..b3f3b7b96683 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-subdir-ccflags-y += -I$(src)
-
HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
goya/goya_coresight.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 0d2952bb58df..85030759b2af 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -6,10 +6,10 @@
*/
#include "goyaP.h"
-#include "include/hw_ip/mmu/mmu_general.h"
-#include "include/hw_ip/mmu/mmu_v1_0.h"
-#include "include/goya/asic_reg/goya_masks.h"
-#include "include/goya/goya_reg_map.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v1_0.h"
+#include "../include/goya/asic_reg/goya_masks.h"
+#include "../include/goya/goya_reg_map.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
@@ -88,6 +88,7 @@
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@@ -337,11 +338,19 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-void goya_get_fixed_properties(struct hl_device *hdev)
+int goya_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
+ prop->max_queues = GOYA_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues,
+ sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
+
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
@@ -361,9 +370,6 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].requires_kernel_cb = 0;
}
- for (; i < HL_MAX_QUEUES; i++)
- prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -426,6 +432,10 @@ void goya_get_fixed_properties(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+
+ prop->max_pending_cs = GOYA_MAX_PENDING_CS;
+
+ return 0;
}
/*
@@ -456,6 +466,7 @@ static int goya_pci_bars_map(struct hl_device *hdev)
static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
@@ -463,7 +474,10 @@ static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
return old_addr;
/* Inbound Region 1 - Bar 4 - Point to DDR */
- rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = DDR_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
if (rc)
return U64_MAX;
@@ -485,8 +499,35 @@ static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
*/
static int goya_init_iatu(struct hl_device *hdev)
{
- return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
- HOST_PHYS_BASE, HOST_PHYS_SIZE);
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_CFG_BAR_ID;
+ inbound_region.addr = SRAM_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ goto done;
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = DDR_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ goto done;
+
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE;
+ outbound_region.size = HOST_PHYS_SIZE;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+done:
+ return rc;
}
/*
@@ -507,7 +548,11 @@ static int goya_early_init(struct hl_device *hdev)
u32 val;
int rc;
- goya_get_fixed_properties(hdev);
+ rc = goya_get_fixed_properties(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get fixed properties\n");
+ return rc;
+ }
/* Check BAR sizes */
if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
@@ -517,7 +562,8 @@ static int goya_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
SRAM_CFG_BAR_ID),
CFG_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
@@ -527,14 +573,15 @@ static int goya_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
MSIX_BAR_ID),
MSIX_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
rc = hl_pci_init(hdev);
if (rc)
- return rc;
+ goto free_queue_props;
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -544,6 +591,10 @@ static int goya_early_init(struct hl_device *hdev)
}
return 0;
+
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
}
/*
@@ -556,6 +607,7 @@ static int goya_early_init(struct hl_device *hdev)
*/
static int goya_early_fini(struct hl_device *hdev)
{
+ kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
@@ -592,11 +644,36 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 trace_freq = 0;
+ u32 pll_clk = 0;
+ u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
+ u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
+ u32 od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ trace_freq = PLL_REF_CLK;
+ else
+ trace_freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ trace_freq = pll_clk;
+ else
+ trace_freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d", div_sel);
+ }
- prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
- prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
- prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
- prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
}
int goya_late_init(struct hl_device *hdev)
@@ -2164,29 +2241,15 @@ static void goya_disable_timestamp(struct hl_device *hdev)
static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
{
- u32 wait_timeout_ms, cpu_timeout_ms;
+ u32 wait_timeout_ms;
dev_info(hdev->dev,
"Halting compute engines and disabling interrupts\n");
- if (hdev->pldm) {
+ if (hdev->pldm)
wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
- cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
- } else {
+ else
wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
- cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
- }
-
- if (hard_reset) {
- /*
- * I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
- */
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
- msleep(cpu_timeout_ms);
- }
goya_stop_external_queues(hdev);
goya_stop_internal_queues(hdev);
@@ -2491,14 +2554,26 @@ disable_queues:
static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
{
struct goya_device *goya = hdev->asic_specific;
- u32 reset_timeout_ms, status;
+ u32 reset_timeout_ms, cpu_timeout_ms, status;
- if (hdev->pldm)
+ if (hdev->pldm) {
reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
- else
+ cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ } else {
reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
+ }
if (hard_reset) {
+ /* I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
+
+ msleep(cpu_timeout_ms);
+
goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
goya_disable_clk_rlx(hdev);
goya_set_pll_refclk(hdev);
@@ -2830,6 +2905,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
return 0;
}
+ if (!timeout)
+ timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
+
return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@@ -3697,9 +3775,8 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
@@ -3771,9 +3848,8 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -3942,8 +4018,7 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
*val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -3999,8 +4074,7 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4044,9 +4118,8 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4088,9 +4161,8 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4431,8 +4503,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
- HL_DEVICE_TIMEOUT_USEC, &result);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
+ total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -4464,8 +4536,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
- rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_DEVICE_TIMEOUT_USEC, &result);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@@ -4623,7 +4695,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
sizeof(struct packet_msg_prot);
- cb = hl_cb_kernel_create(hdev, cb_size);
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
if (!cb)
return -ENOMEM;
@@ -5028,14 +5100,14 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
-static void goya_enable_clock_gating(struct hl_device *hdev)
+static void goya_set_clock_gating(struct hl_device *hdev)
{
-
+ /* clock gating not supported in Goya */
}
static void goya_disable_clock_gating(struct hl_device *hdev)
{
-
+ /* clock gating not supported in Goya */
}
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
@@ -5153,19 +5225,14 @@ static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
-u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
-{
- return cq_idx;
-}
-
-static void goya_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+static int goya_ctx_init(struct hl_ctx *ctx)
{
-
+ return 0;
}
-static void goya_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
-
+ return cq_idx;
}
static u32 goya_get_signal_cb_size(struct hl_device *hdev)
@@ -5259,7 +5326,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
- .enable_clock_gating = goya_enable_clock_gating,
+ .set_clock_gating = goya_set_clock_gating,
.disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
@@ -5276,13 +5343,12 @@ static const struct hl_asic_funcs goya_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = goya_halt_coresight,
+ .ctx_init = goya_ctx_init,
.get_clk_rate = goya_get_clk_rate,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.read_device_fw_version = goya_read_device_fw_version,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
- .ext_queue_init = goya_ext_queue_init,
- .ext_queue_reset = goya_ext_queue_reset,
.get_signal_cb_size = goya_get_signal_cb_size,
.get_wait_cb_size = goya_get_wait_cb_size,
.gen_signal_cb = goya_gen_signal_cb,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index d36f8d90c9c9..bb7474ee9784 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -9,12 +9,12 @@
#define GOYAP_H_
#include <uapi/misc/habanalabs.h>
-#include "habanalabs.h"
-#include "include/hl_boot_if.h"
-#include "include/goya/goya_packets.h"
-#include "include/goya/goya.h"
-#include "include/goya/goya_async_events.h"
-#include "include/goya/goya_fw_if.h"
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/goya/goya_packets.h"
+#include "../include/goya/goya.h"
+#include "../include/goya/goya_async_events.h"
+#include "../include/goya/goya_fw_if.h"
#define NUMBER_OF_CMPLT_QUEUES 5
#define NUMBER_OF_EXT_HW_QUEUES 5
@@ -31,10 +31,6 @@
*/
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1)
-#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES)
-#error "Number of H/W queues must be smaller than HL_MAX_QUEUES"
-#endif
-
#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES)
#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
#endif
@@ -57,6 +53,12 @@
#define GOYA_DEFAULT_CARD_NAME "HL1000"
+#define GOYA_MAX_PENDING_CS 64
+
+#if !IS_MAX_PENDING_CS_VALID(GOYA_MAX_PENDING_CS)
+#error "GOYA_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
@@ -164,7 +166,7 @@ struct goya_device {
u8 device_cpu_mmu_mappings_done;
};
-void goya_get_fixed_properties(struct hl_device *hdev);
+int goya_get_fixed_properties(struct hl_device *hdev);
int goya_mmu_init(struct hl_device *hdev);
void goya_init_dma_qmans(struct hl_device *hdev);
void goya_init_mme_qmans(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 1258724ea510..b03912483de0 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -6,9 +6,9 @@
*/
#include "goyaP.h"
-#include "include/goya/goya_coresight.h"
-#include "include/goya/asic_reg/goya_regs.h"
-#include "include/goya/asic_reg/goya_masks.h"
+#include "../include/goya/goya_coresight.h"
+#include "../include/goya/asic_reg/goya_regs.h"
+#include "../include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -232,6 +232,7 @@ static int goya_config_stm(struct hl_device *hdev,
{
struct hl_debug_params_stm *input;
u64 base_reg;
+ u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
@@ -264,7 +265,10 @@ static int goya_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE20, 0xFFFFFFFF);
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
- WREG32(base_reg + 0xE8C, input->frequency);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
} else {
@@ -640,7 +644,6 @@ static int goya_config_spmu(struct hl_device *hdev,
int goya_debug_coresight(struct hl_device *hdev, void *data)
{
struct hl_debug_params *params = data;
- u32 val;
int rc = 0;
switch (params->op) {
@@ -672,7 +675,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index de8297001fea..14701836f92b 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -6,7 +6,7 @@
*/
#include "goyaP.h"
-#include "include/goya/asic_reg/goya_regs.h"
+#include "../include/goya/asic_reg/goya_regs.h"
/*
* goya_set_block_as_protected - set the given block as protected
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/common/armcp_if.h
index a34fc39ad87e..07f9972db28d 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/common/armcp_if.h
@@ -19,9 +19,19 @@ struct hl_eq_header {
__le32 ctl;
};
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
- __le64 data[7];
+ union {
+ struct hl_eq_ecc_data ecc_data;
+ __le64 data[7];
+ };
};
#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
@@ -276,6 +286,8 @@ struct armcp_packet {
/* For get Armcp info/EEPROM data */
__le32 data_max_size;
};
+
+ __le32 reserved;
};
struct armcp_unmask_irq_arr_packet {
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index c22d134e73af..bb67cafc6e00 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -44,6 +44,15 @@
* The NIC FW loading and initialization
* failed. This means NICs are not usable.
*
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -57,6 +66,8 @@
#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -79,7 +90,10 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
/* Last boot loader progress status, ready to receive commands */
CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/common/qman_if.h
index 0fdb49188ed7..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/common/qman_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 85e3b5148595..f92dc53af074 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -91,18 +91,16 @@
#include "psoc_pci_pll_regs.h"
#include "psoc_hbm_pll_regs.h"
+#include "psoc_cpu_pll_regs.h"
-#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
-#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
-#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
-#define GAUDI_ECC_SERR0_OFFSET 0xF30
-#define GAUDI_ECC_SERR1_OFFSET 0xF34
-#define GAUDI_ECC_SERR2_OFFSET 0xF38
-#define GAUDI_ECC_SERR3_OFFSET 0xF3C
-#define GAUDI_ECC_DERR0_OFFSET 0xF40
-#define GAUDI_ECC_DERR1_OFFSET 0xF44
-#define GAUDI_ECC_DERR2_OFFSET 0xF48
-#define GAUDI_ECC_DERR3_OFFSET 0xF4C
+#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
+#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
+#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
+#define GAUDI_ECC_MEM_INFO_CLR_OFFSET 0xF28
+#define GAUDI_ECC_MEM_INFO_CLR_SERR_MASK BIT(8)
+#define GAUDI_ECC_MEM_INFO_CLR_DERR_MASK BIT(9)
+#define GAUDI_ECC_SERR0_OFFSET 0xF30
+#define GAUDI_ECC_DERR0_OFFSET 0xF40
#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x492000
#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x494000
@@ -294,6 +292,7 @@
#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+#define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490
#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
new file mode 100644
index 000000000000..2585c70f59ef
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_CPU_PLL_REGS_H_
+#define ASIC_REG_PSOC_CPU_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_CPU_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_CPU_PLL_NR 0xC70100
+
+#define mmPSOC_CPU_PLL_NF 0xC70104
+
+#define mmPSOC_CPU_PLL_OD 0xC70108
+
+#define mmPSOC_CPU_PLL_NB 0xC7010C
+
+#define mmPSOC_CPU_PLL_CFG 0xC70110
+
+#define mmPSOC_CPU_PLL_LOSE_MASK 0xC70120
+
+#define mmPSOC_CPU_PLL_LOCK_INTR 0xC70128
+
+#define mmPSOC_CPU_PLL_LOCK_BYPASS 0xC7012C
+
+#define mmPSOC_CPU_PLL_DATA_CHNG 0xC70130
+
+#define mmPSOC_CPU_PLL_RST 0xC70134
+
+#define mmPSOC_CPU_PLL_SLIP_WD_CNTR 0xC70150
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_0 0xC70200
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_1 0xC70204
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_2 0xC70208
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_3 0xC7020C
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_0 0xC70220
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_1 0xC70224
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_2 0xC70228
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_3 0xC7022C
+
+#define mmPSOC_CPU_PLL_DIV_SEL_0 0xC70280
+
+#define mmPSOC_CPU_PLL_DIV_SEL_1 0xC70284
+
+#define mmPSOC_CPU_PLL_DIV_SEL_2 0xC70288
+
+#define mmPSOC_CPU_PLL_DIV_SEL_3 0xC7028C
+
+#define mmPSOC_CPU_PLL_DIV_EN_0 0xC702A0
+
+#define mmPSOC_CPU_PLL_DIV_EN_1 0xC702A4
+
+#define mmPSOC_CPU_PLL_DIV_EN_2 0xC702A8
+
+#define mmPSOC_CPU_PLL_DIV_EN_3 0xC702AC
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_0 0xC702C0
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_1 0xC702C4
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_2 0xC702C8
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_3 0xC702CC
+
+#define mmPSOC_CPU_PLL_CLK_GATER 0xC70300
+
+#define mmPSOC_CPU_PLL_CLK_RLX_0 0xC70310
+
+#define mmPSOC_CPU_PLL_CLK_RLX_1 0xC70314
+
+#define mmPSOC_CPU_PLL_CLK_RLX_2 0xC70318
+
+#define mmPSOC_CPU_PLL_CLK_RLX_3 0xC7031C
+
+#define mmPSOC_CPU_PLL_REF_CNTR_PERIOD 0xC70400
+
+#define mmPSOC_CPU_PLL_REF_LOW_THRESHOLD 0xC70410
+
+#define mmPSOC_CPU_PLL_REF_HIGH_THRESHOLD 0xC70420
+
+#define mmPSOC_CPU_PLL_PLL_NOT_STABLE 0xC70430
+
+#define mmPSOC_CPU_PLL_FREQ_CALC_EN 0xC70440
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_CFG 0xC70500
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_0 0xC70510
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_1 0xC70514
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_2 0xC70518
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_3 0xC7051C
+
+#endif /* ASIC_REG_PSOC_CPU_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 96f08050ef0f..13ef6b2887fd 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -455,4 +455,7 @@ enum axi_id {
QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
+#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+
#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index 9a5800b0086b..f30f2c0458d7 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -85,7 +85,7 @@ struct packet_msg_long {
};
#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0
-#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x0000EFFF
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x00007FFF
#define GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT 31
#define GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000
@@ -141,7 +141,7 @@ struct packet_msg_prot {
#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000
#define GAUDI_PKT_FENCE_CFG_ID_SHIFT 30
-#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC000000
+#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC0000000
#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
@@ -197,6 +197,9 @@ struct packet_wait {
__le32 ctl;
};
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT 0
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK 0x00000001
+
struct packet_load_and_exe {
__le32 cfg;
__le32 ctl;
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 927309b86bab..fea3ae9d8686 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -207,7 +207,7 @@ static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
static inline int fifo_sz(int nr_entry)
{
/* size of a fifo is determined by the number of entries it contains */
- return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
+ return nr_entry * sizeof(u64) + FIFOHANDLESIZE;
}
static void fifo_setup(void *base_addr, int nr_entry)
@@ -256,7 +256,8 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
memset_io(device_ccb, 0, sizeof(struct ccb));
/* free resources used to back send/recv queues */
- pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
+ dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va,
+ data->dma_pa);
}
static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
@@ -272,16 +273,14 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
2 * desc_mem_sz(NR_QENTRY) +
ILO_START_ALIGN + ILO_CACHE_SZ;
- data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
- &data->dma_pa);
+ data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size,
+ &data->dma_pa, GFP_ATOMIC);
if (!data->dma_va)
return -ENOMEM;
dma_va = (char *)data->dma_va;
dma_pa = data->dma_pa;
- memset(dma_va, 0, data->dma_size);
-
dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
dma_pa = roundup(dma_pa, ILO_START_ALIGN);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 1aa433a7f66c..f69ff645cac9 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -160,23 +160,23 @@ struct ccb_data {
#define ILO_START_ALIGN 4096
#define ILO_CACHE_SZ 128
struct fifo {
- u64 nrents; /* user requested number of fifo entries */
- u64 imask; /* mask to extract valid fifo index */
- u64 merge; /* O/C bits to merge in during enqueue operation */
- u64 reset; /* set to non-zero when the target device resets */
- u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
+ u64 nrents; /* user requested number of fifo entries */
+ u64 imask; /* mask to extract valid fifo index */
+ u64 merge; /* O/C bits to merge in during enqueue operation */
+ u64 reset; /* set to non-zero when the target device resets */
+ u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
- u64 head;
- u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
+ u64 head;
+ u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
- u64 tail;
- u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
+ u64 tail;
+ u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
- u64 fifobar[1];
+ u64 fifobar[];
};
/* convert between struct fifo, and the fifobar, which is saved in the ccb */
-#define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64))
+#define FIFOHANDLESIZE (sizeof(struct fifo))
#define FIFOBARTOHANDLE(_fifo) \
((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE))
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 2863657fa268..733dd30fbacc 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -94,7 +94,7 @@ static inline void do_exec_command(struct service_processor *sp)
}
}
-/**
+/*
* exec_command
* send a command to a service processor
* Commands are executed sequentially. One command (sp->current_command)
@@ -140,7 +140,7 @@ static void exec_next_command(struct service_processor *sp)
}
}
-/**
+/*
* Sleep until a command has failed or a response has been received
* and the command status been updated by the interrupt handler.
* (see receive_response).
@@ -153,7 +153,7 @@ void ibmasm_wait_for_response(struct command *cmd, int timeout)
timeout * HZ);
}
-/**
+/*
* receive_command_response
* called by the interrupt handler when a dot command of type command_response
* was received.
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 70273a4cb352..df389bd4c9df 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -10,7 +10,7 @@
#include "ibmasm.h"
#include "dot_command.h"
-/**
+/*
* Dispatch an incoming message to the specific handler for the message.
* Called from interrupt context.
*/
@@ -48,7 +48,7 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
#define INIT_BUFFER_SIZE 32
-/**
+/*
* send the 4.3.5.10 dot command (driver VPD) to the service processor
*/
int ibmasm_send_driver_vpd(struct service_processor *sp)
@@ -99,7 +99,7 @@ struct os_state_command {
unsigned char data;
};
-/**
+/*
* send the 4.3.6 dot command (os state) to the service processor
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 974d63f5a4dd..40ce75f8970c 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -31,7 +31,7 @@ static void wake_up_event_readers(struct service_processor *sp)
wake_up_interruptible(&reader->wait);
}
-/**
+/*
* receive_event
* Called by the interrupt handler when a dot command of type sp_event is
* received.
@@ -68,7 +68,7 @@ static inline int event_available(struct event_buffer *b, struct event_reader *r
return (r->next_serial_number < b->next_serial_number);
}
-/**
+/*
* get_next_event
* Called by event readers (initiated from user space through the file
* system).
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 6567df638ea9..21c9b6a6f2c3 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -39,7 +39,7 @@ void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_
rhb->stopped = 0;
}
-/**
+/*
* start_reverse_heartbeat
* Loop forever, sending a reverse heartbeat dot command to the service
* processor, then sleeping. The loop comes to an end if the service
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 2ed23c99f59f..c0d139c26505 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -760,7 +760,7 @@ static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
* @adapter: crq_server_adapter struct
* @buffer: ibmvmc_buffer struct
* @hmc: ibmvmc_hmc struct
- * @msg_length: message length field
+ * @msg_len: message length field
*
* This command is sent between the management partition and the hypervisor
* in order to signal the arrival of an HMC protocol message. The command
@@ -1028,7 +1028,7 @@ static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
* ibmvmc_write - Write
*
* @file: file struct
- * @buf: Character buffer
+ * @buffer: Character buffer
* @count: Count field
* @ppos: Offset
*
@@ -1347,7 +1347,7 @@ static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
/**
* ibmvmc_ioctl - IOCTL
*
- * @session: ibmvmc_file_session struct
+ * @file: file information
* @cmd: cmd field
* @arg: Argument field
*
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 884485c3f723..5eaf74447ca1 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -67,7 +67,6 @@ static void firmware_load(const struct firmware *fw, void *context)
struct spi_device *spi = (struct spi_device *)context;
struct fpga_data *data = spi_get_drvdata(spi);
u8 *buffer;
- int ret;
u8 txbuf[8];
u8 rxbuf[8];
int rx_len = 8;
@@ -92,7 +91,7 @@ static void firmware_load(const struct firmware *fw, void *context)
/* Trying to speak with the FPGA via SPI... */
txbuf[0] = FPGA_CMD_READ_ID;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
jedec_id = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id);
@@ -110,7 +109,7 @@ static void firmware_load(const struct firmware *fw, void *context)
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
@@ -130,20 +129,20 @@ static void firmware_load(const struct firmware *fw, void *context)
memcpy(buffer + 4, fw->data, fw->size);
txbuf[0] = FPGA_CMD_REFRESH;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_WRITE_EN;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_CLEAR;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
/*
* Wait for FPGA memory to become cleared
*/
for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
if (status == FPGA_STATUS_CLEARED)
break;
@@ -160,13 +159,13 @@ static void firmware_load(const struct firmware *fw, void *context)
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
- ret = spi_write(spi, buffer, fw->size + 8);
+ spi_write(spi, buffer, fw->size + 8);
txbuf[0] = FPGA_CMD_WRITE_DIS;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 736675f0a246..4dfbfd51bdf7 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#ifdef CONFIG_X86_32
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
#include <asm/desc.h>
#endif
@@ -118,9 +118,8 @@ noinline void lkdtm_CORRUPT_STACK(void)
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing char array ...\n");
+ pr_info("Corrupting stack containing char array ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
}
/* Same as above but will only get a canary with -fstack-protector-strong */
@@ -131,9 +130,8 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
unsigned long *ptr;
} data __aligned(sizeof(void *));
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing union ...\n");
+ pr_info("Corrupting stack containing union ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
@@ -248,6 +246,7 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
+ pr_err("FAIL: survived array bounds overflow!\n");
}
void lkdtm_CORRUPT_LIST_ADD(void)
@@ -334,7 +333,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
byte = *ptr;
- pr_err("FAIL: accessed page before stack!\n");
+ pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
@@ -348,7 +347,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
byte = *ptr;
- pr_err("FAIL: accessed page after stack!\n");
+ pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
void lkdtm_UNSET_SMEP(void)
@@ -419,7 +418,7 @@ void lkdtm_UNSET_SMEP(void)
void lkdtm_DOUBLE_FAULT(void)
{
-#ifdef CONFIG_X86_32
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
@@ -454,38 +453,42 @@ void lkdtm_DOUBLE_FAULT(void)
#endif
}
-#ifdef CONFIG_ARM64_PTR_AUTH
+#ifdef CONFIG_ARM64
static noinline void change_pac_parameters(void)
{
- /* Reset the keys of current task */
- ptrauth_thread_init_kernel(current);
- ptrauth_thread_switch_kernel(current);
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
+ /* Reset the keys of current task */
+ ptrauth_thread_init_kernel(current);
+ ptrauth_thread_switch_kernel(current);
+ }
}
+#endif
-#define CORRUPT_PAC_ITERATE 10
noinline void lkdtm_CORRUPT_PAC(void)
{
+#ifdef CONFIG_ARM64
+#define CORRUPT_PAC_ITERATE 10
int i;
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
+
if (!system_supports_address_auth()) {
- pr_err("FAIL: arm64 pointer authentication feature not present\n");
+ pr_err("FAIL: CPU lacks pointer authentication feature\n");
return;
}
- pr_info("Change the PAC parameters to force function return failure\n");
+ pr_info("changing PAC parameters to force function return failure...\n");
/*
- * Pac is a hash value computed from input keys, return address and
+ * PAC is a hash value computed from input keys, return address and
* stack pointer. As pac has fewer bits so there is a chance of
* collision, so iterate few times to reduce the collision probability.
*/
for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
change_pac_parameters();
- pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__);
-}
-#else /* !CONFIG_ARM64_PTR_AUTH */
-noinline void lkdtm_CORRUPT_PAC(void)
-{
- pr_err("FAIL: arm64 pointer authentication config disabled\n");
-}
+ pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
+#else
+ pr_err("XFAIL: this test is arm64-only\n");
#endif
+}
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 3c5cec85edce..1323bc16f113 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -58,11 +58,12 @@ void lkdtm_READ_AFTER_FREE(void)
int *base, *val, saw;
size_t len = 1024;
/*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
*/
- size_t offset = (len / sizeof(*base)) / 2;
+ size_t offset = sizeof(*base);
base = kmalloc(len, GFP_KERNEL);
if (!base) {
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 601a2156a0d4..8878538b2c13 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -31,9 +31,7 @@ void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void);
-#endif
void lkdtm_CORRUPT_PAC(void);
/* lkdtm_heap.c */
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 62f76d506f04..2dede2ef658f 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -57,6 +57,7 @@ static noinline void execute_location(void *dst, bool write)
}
pr_info("attempting bad execution at %px\n", func);
func();
+ pr_err("FAIL: func returned\n");
}
static void execute_user_location(void *dst)
@@ -75,20 +76,22 @@ static void execute_user_location(void *dst)
return;
pr_info("attempting bad execution at %px\n", func);
func();
+ pr_err("FAIL: func returned\n");
}
void lkdtm_WRITE_RO(void)
{
- /* Explicitly cast away "const" for the test. */
- unsigned long *ptr = (unsigned long *)&rodata;
+ /* Explicitly cast away "const" for the test and make volatile. */
+ volatile unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
}
void lkdtm_WRITE_RO_AFTER_INIT(void)
{
- unsigned long *ptr = &ro_after_init;
+ volatile unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
@@ -102,19 +105,21 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
}
void lkdtm_WRITE_KERN(void)
{
size_t size;
- unsigned char *ptr;
+ volatile unsigned char *ptr;
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
pr_info("attempting bad %zu byte write at %px\n", size, ptr);
- memcpy(ptr, (unsigned char *)do_nothing, size);
+ memcpy((void *)ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+ pr_err("FAIL: survived bad write\n");
do_overwritten();
}
@@ -193,9 +198,11 @@ void lkdtm_ACCESS_USERSPACE(void)
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
vm_munmap(user_addr, PAGE_SIZE);
}
@@ -203,19 +210,20 @@ void lkdtm_ACCESS_USERSPACE(void)
void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
- unsigned long *ptr = (unsigned long *)NULL;
+ volatile unsigned long *ptr = (unsigned long *)NULL;
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
}
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
-
}
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index e172719dd86d..b833367a45d0 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -304,19 +304,22 @@ void lkdtm_USERCOPY_KERNEL(void)
return;
}
- pr_info("attempting good copy_to_user from kernel rodata\n");
+ pr_info("attempting good copy_to_user from kernel rodata: %px\n",
+ test_text);
if (copy_to_user((void __user *)user_addr, test_text,
unconst + sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
- pr_info("attempting bad copy_to_user from kernel text\n");
+ pr_info("attempting bad copy_to_user from kernel text: %px\n",
+ vm_mmap);
if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
+ pr_err("FAIL: survived bad copy_to_user()\n");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 9d7b3719bfa0..f5fd5b786607 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -9,7 +9,7 @@ config INTEL_MEI
if selected /dev/mei misc device will be created.
For more information see
- <http://software.intel.com/en-us/manageability/>
+ <https://software.intel.com/en-us/manageability/>
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 910f059b3384..07ba16d46690 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -159,17 +159,17 @@ static int mei_osver(struct mei_cl_device *cldev)
static int mei_fwver(struct mei_cl_device *cldev)
{
char buf[MKHI_FWVER_BUF_LEN];
- struct mkhi_msg *req;
+ struct mkhi_msg req;
+ struct mkhi_msg *rsp;
struct mkhi_fw_ver *fwver;
int bytes_recv, ret, i;
memset(buf, 0, sizeof(buf));
- req = (struct mkhi_msg *)buf;
- req->hdr.group_id = MKHI_GEN_GROUP_ID;
- req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
+ req.hdr.group_id = MKHI_GEN_GROUP_ID;
+ req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
- ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
+ ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req),
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
@@ -188,7 +188,8 @@ static int mei_fwver(struct mei_cl_device *cldev)
return -EIO;
}
- fwver = (struct mkhi_fw_ver *)req->data;
+ rsp = (struct mkhi_msg *)buf;
+ fwver = (struct mkhi_fw_ver *)rsp->data;
memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver));
for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) {
if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1))
@@ -329,16 +330,14 @@ static int mei_nfc_if_version(struct mei_cl *cl,
WARN_ON(mutex_is_locked(&bus->device_lock));
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
- MEI_CL_IO_TX_BLOCKING);
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(bus->dev, "Could not send IF version cmd\n");
return ret;
}
/* to be sure on the stack we alloc memory */
- if_version_length = sizeof(struct mei_nfc_reply) +
- sizeof(struct mei_nfc_if_version);
+ if_version_length = sizeof(*reply) + sizeof(*ver);
reply = kzalloc(if_version_length, GFP_KERNEL);
if (!reply)
@@ -352,7 +351,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
goto err;
}
- memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
+ memcpy(ver, reply->data, sizeof(*ver));
dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
ver->fw_ivn, ver->vendor_id, ver->radio_type);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 8d468e0a950a..a6dfc3ce1db2 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev)
mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
- dev->driver = NULL;
- return ret;
+ return ret;
}
static ssize_t name_show(struct device *dev, struct device_attribute *a,
@@ -932,7 +931,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_cl_device *cldev;
struct mei_cl *cl;
- cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
+ cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
if (!cldev)
return NULL;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index b32c825a0945..2572887d99b6 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -369,7 +369,7 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
{
struct mei_cl_cb *cb;
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
@@ -552,7 +552,7 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
*/
static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
- memset(cl, 0, sizeof(struct mei_cl));
+ memset(cl, 0, sizeof(*cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
@@ -575,7 +575,7 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return NULL;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index a44094cdbc36..308caee86920 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
@@ -257,22 +257,21 @@ int mei_hbm_start_wait(struct mei_device *dev)
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_host_version_request start_req;
- const size_t len = sizeof(struct hbm_host_version_request);
+ struct hbm_host_version_request req;
int ret;
mei_hbm_reset(dev);
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
/* host start message */
- memset(&start_req, 0, len);
- start_req.hbm_cmd = HOST_START_REQ_CMD;
- start_req.host_version.major_version = HBM_MAJOR_VERSION;
- start_req.host_version.minor_version = HBM_MINOR_VERSION;
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = HOST_START_REQ_CMD;
+ req.host_version.major_version = HBM_MAJOR_VERSION;
+ req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_hbm_write_message(dev, &mei_hdr, &start_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -295,13 +294,12 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_dma_setup_request req;
- const size_t len = sizeof(struct hbm_dma_setup_request);
unsigned int i;
int ret;
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD;
for (i = 0; i < DMA_DSCR_NUM; i++) {
phys_addr_t paddr;
@@ -337,21 +335,19 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_host_enum_request enum_req;
- const size_t len = sizeof(struct hbm_host_enum_request);
+ struct hbm_host_enum_request req;
int ret;
/* enumerate clients */
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&enum_req, 0, len);
- enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req.flags |= dev->hbm_f_dc_supported ?
- MEI_HBM_ENUM_F_ALLOW_ADD : 0;
- enum_req.flags |= dev->hbm_f_ie_supported ?
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = HOST_ENUM_REQ_CMD;
+ req.flags |= dev->hbm_f_dc_supported ? MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_hbm_write_message(dev, &mei_hdr, &enum_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -380,7 +376,7 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
mei_me_cl_rm_by_uuid(dev, uuid);
- me_cl = kzalloc(sizeof(struct mei_me_client), GFP_KERNEL);
+ me_cl = kzalloc(sizeof(*me_cl), GFP_KERNEL);
if (!me_cl)
return -ENOMEM;
@@ -408,14 +404,13 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
struct mei_msg_hdr mei_hdr;
struct hbm_add_client_response resp;
- const size_t len = sizeof(struct hbm_add_client_response);
int ret;
dev_dbg(dev->dev, "adding client response\n");
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(resp));
- memset(&resp, 0, sizeof(struct hbm_add_client_response));
+ memset(&resp, 0, sizeof(resp));
resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
resp.me_addr = addr;
resp.status = status;
@@ -469,11 +464,10 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_msg_hdr mei_hdr;
struct hbm_notification_request req;
- const size_t len = sizeof(struct hbm_notification_request);
int ret;
- mei_hbm_hdr(&mei_hdr, len);
- mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+ mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, sizeof(req));
req.start = start;
@@ -580,8 +574,7 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_props_request prop_req;
- const size_t len = sizeof(struct hbm_props_request);
+ struct hbm_props_request req;
unsigned long addr;
int ret;
@@ -591,18 +584,17 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
if (addr == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
mei_host_client_init(dev);
-
return 0;
}
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&prop_req, 0, sizeof(struct hbm_props_request));
+ memset(&req, 0, sizeof(req));
- prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req.me_addr = addr;
+ req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ req.me_addr = addr;
- ret = mei_hbm_write_message(dev, &mei_hdr, &prop_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -628,15 +620,14 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
struct mei_msg_hdr mei_hdr;
struct hbm_power_gate req;
- const size_t len = sizeof(struct hbm_power_gate);
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = pg_cmd;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
@@ -657,11 +648,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_host_stop_request req;
- const size_t len = sizeof(struct hbm_host_stop_request);
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 4c596c646ac0..d1d3e025ca0e 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -572,7 +572,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m);
memcpy(verify_mprime_in.streams, data->streams,
- (data->k * sizeof(struct hdcp2_streamid_type)));
+ array_size(data->k, sizeof(*data->streams)));
verify_mprime_in.k = cpu_to_be16(data->k);
@@ -852,7 +852,7 @@ static int mei_hdcp_remove(struct mei_cl_device *cldev)
#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
-static struct mei_cl_device_id mei_hdcp_tbl[] = {
+static const struct mei_cl_device_id mei_hdcp_tbl[] = {
{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
{ }
};
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index 18ffc773fa18..834757f5e072 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -358,7 +358,7 @@ struct wired_cmd_repeater_auth_stream_req_in {
u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
u8 m_prime[HDCP_2_2_MPRIME_LEN];
__be16 k;
- struct hdcp2_streamid_type streams[1];
+ struct hdcp2_streamid_type streams[];
} __packed;
struct wired_cmd_repeater_auth_stream_req_out {
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9392934e3a06..9cf8d8f60cfe 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -59,6 +59,7 @@
#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */
#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
+#define MEI_DEV_ID_SPT_3 0x9D3E /* Sunrise Point 3 (iToutch) */
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
@@ -73,6 +74,7 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+#define MEI_DEV_ID_KBP_3 0xA2BE /* Kaby Point 3 (iTouch) */
#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
#define MEI_DEV_ID_CNP_LP_3 0x9DE4 /* Cannon Point LP 3 (iTouch) */
@@ -94,6 +96,7 @@
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
+#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
@@ -105,8 +108,12 @@
/* Host Firmware Status Registers in PCI Config Space */
#define PCI_CFG_HFS_1 0x40
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
+# define PCI_CFG_HFS_1_OPMODE_MSK 0xf0000 /* OP MODE Mask: SPS <= 4.0 */
+# define PCI_CFG_HFS_1_OPMODE_SPS 0xf0000 /* SPS SKU : SPS <= 4.0 */
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
+# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
+# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
#define PCI_CFG_HFS_6 0x6C
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index f620442addf5..cda0829ac589 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1353,11 +1353,24 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.read = mei_me_read_slots
};
-static bool mei_me_fw_type_nm(struct pci_dev *pdev)
+/**
+ * mei_me_fw_type_nm() - check for nm sku
+ *
+ * Read ME FW Status register to check for the Node Manager (NM) Firmware.
+ * The NM FW is only signaled in PCI function 0.
+ * __Note__: Deprecated by PCH8 and newer.
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of NM firmware
+ */
+static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
{
u32 reg;
+ unsigned int devfn;
- pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return (reg & 0x600) == 0x200;
@@ -1366,23 +1379,61 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+/**
+ * mei_me_fw_sku_sps_4() - check for sps 4.0 sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in the PCI function 0.
+ * __Note__: Deprecated by SPS 5.0 and newer.
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
- /*
- * Read ME FW Status register to check for SPS Firmware
- * The SPS FW is only signaled in pci function 0
- */
devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
- /* if bits [19:16] = 15, running SPS Firmware */
- return (reg & 0xf0000) == 0xf0000;
+ return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
+}
+
+#define MEI_CFG_FW_SPS_4 \
+ .quirk_probe = mei_me_fw_type_sps_4
+
+/**
+ * mei_me_fw_sku_sps() - check for sps sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in pci function 0
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
+{
+ u32 reg;
+ u32 fw_type;
+ unsigned int devfn;
+
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
+ fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
+
+ dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
+
+ return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
}
-#define MEI_CFG_FW_SPS \
+#define MEI_CFG_KIND_ITOUCH \
+ .kind = "itouch"
+
+#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
#define MEI_CFG_FW_VER_SUPP \
@@ -1451,11 +1502,25 @@ static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_FW_VER_SUPP,
};
+/* PCH8 Lynx Point and newer devices - iTouch */
+static const struct mei_cfg mei_me_pch8_itouch_cfg = {
+ MEI_CFG_KIND_ITOUCH,
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-static const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_4,
+};
+
+/* LBG with quirk for SPS (4.0) Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_FW_SPS_4,
};
/* Cannon Lake and newer devices */
@@ -1465,10 +1530,21 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
-/* LBG with quirk for SPS Firmware exclusion */
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
static const struct mei_cfg mei_me_pch12_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_FW_SPS,
+};
+
+/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support.
+ */
+static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
+ MEI_CFG_KIND_ITOUCH,
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
};
@@ -1480,6 +1556,15 @@ static const struct mei_cfg mei_me_pch15_cfg = {
MEI_CFG_TRC,
};
+/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
+static const struct mei_cfg mei_me_pch15_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+ MEI_CFG_FW_SPS,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1492,10 +1577,14 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
- [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+ [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
+ [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
+ [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
+ [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
@@ -1523,8 +1612,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(parent, sizeof(struct mei_device) +
- sizeof(struct mei_me_hw), GFP_KERNEL);
+ dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1538,6 +1626,8 @@ struct mei_device *mei_me_dev_init(struct device *parent,
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+ dev->kind = cfg->kind;
+
return dev;
}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index b6b94e211464..00a7132ac7a2 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -19,13 +19,15 @@
*
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
+ * @kind: MEI head kind
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
* @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
- bool (*quirk_probe)(struct pci_dev *pdev);
+ bool (*quirk_probe)(const struct pci_dev *pdev);
+ const char *kind;
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
u32 hw_trc_supported:1;
@@ -76,14 +78,22 @@ struct mei_me_hw {
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
* client platforms.
- * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
+ * @MEI_ME_PCH8_ITOUCH_CFG:Platform Controller Hub Gen8 and newer
+ * client platforms (iTouch).
+ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
- * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
+ * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -94,10 +104,14 @@ enum mei_cfg_idx {
MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
- MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_PCH8_ITOUCH_CFG,
+ MEI_ME_PCH8_SPS_4_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH12_SPS_4_CFG,
MEI_ME_PCH12_SPS_CFG,
+ MEI_ME_PCH12_SPS_ITOUCH_CFG,
MEI_ME_PCH15_CFG,
+ MEI_ME_PCH15_SPS_CFG,
MEI_ME_NUM_CFG,
};
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 785b260b3ae9..a4e854b9b9e6 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1201,8 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
struct mei_device *dev;
struct mei_txe_hw *hw;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
- sizeof(struct mei_txe_hw), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b1a8d5ec88b3..26fa92cb7f7a 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -319,7 +319,7 @@ struct hbm_props_response {
u8 hbm_cmd;
u8 me_addr;
u8 status;
- u8 reserved[1];
+ u8 reserved;
struct mei_client_properties client_properties;
} __packed;
@@ -352,7 +352,7 @@ struct hbm_add_client_response {
u8 hbm_cmd;
u8 me_addr;
u8 status;
- u8 reserved[1];
+ u8 reserved;
} __packed;
/**
@@ -461,7 +461,7 @@ struct hbm_notification {
u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
- u8 reserved[1];
+ u8 reserved;
} __packed;
/**
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index f17297f2943d..86ef5c1a7928 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -476,7 +476,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
case IOCTL_MEI_CONNECT_CLIENT:
dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
if (copy_from_user(&connect_data, (char __user *)data,
- sizeof(struct mei_connect_client_data))) {
+ sizeof(connect_data))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
@@ -488,7 +488,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
/* if all is ok, copying the data back to user. */
if (copy_to_user((char __user *)data, &connect_data,
- sizeof(struct mei_connect_client_data))) {
+ sizeof(connect_data))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
@@ -885,6 +885,30 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
}
}
+/**
+ * kind_show - display device kind
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t kind_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ ssize_t ret;
+
+ if (dev->kind)
+ ret = sprintf(buf, "%s\n", dev->kind);
+ else
+ ret = sprintf(buf, "%s\n", "mei");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(kind);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
@@ -893,6 +917,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
&dev_attr_trc.attr,
+ &dev_attr_kind.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 3a29db07211d..d3a4f54c0ae7 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -445,6 +445,8 @@ struct mei_fw_version {
* @device_list : mei client bus list
* @cl_bus_lock : client bus list lock
*
+ * @kind : kind of mei device
+ *
* @dbgfs_dir : debugfs mei root directory
*
* @ops: : hw specific operations
@@ -528,6 +530,8 @@ struct mei_device {
struct list_head device_list;
struct mutex cl_bus_lock;
+ const char *kind;
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 71f795b510ce..1de9ef7a272b 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -59,18 +59,19 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
@@ -81,21 +82,23 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 8f201d019f5a..b9bb086785db 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -4,6 +4,7 @@ menu "Intel MIC & related support"
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
depends on 64BIT && PCI && X86
+ select DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
@@ -19,6 +20,7 @@ config INTEL_MIC_BUS
config SCIF_BUS
tristate "SCIF Bus Driver"
depends on 64BIT && PCI && X86
+ select DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
@@ -33,6 +35,7 @@ config SCIF_BUS
config VOP_BUS
tristate "VOP Bus Driver"
+ select DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
@@ -49,6 +52,7 @@ config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
+ select DMA_OPS
help
This enables Host Driver support for the Intel Many Integrated
Core (MIC) family of PCIe form factor coprocessor devices that
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index b58608829b18..4c326e8f4d99 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -24,7 +24,7 @@
/* Debugfs parent dir */
static struct dentry *mic_dbg;
-/**
+/*
* mic_intr_show - Send interrupts to host.
*/
static int mic_intr_show(struct seq_file *s, void *unused)
@@ -46,7 +46,7 @@ static int mic_intr_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(mic_intr);
-/**
+/*
* mic_create_card_debug_dir - Initialize MIC debugfs entries.
*/
void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
@@ -60,7 +60,7 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
&mic_intr_fops);
}
-/**
+/*
* mic_delete_card_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_card_debug_dir(struct mic_driver *mdrv)
@@ -68,7 +68,7 @@ void mic_delete_card_debug_dir(struct mic_driver *mdrv)
debugfs_remove_recursive(mdrv->dbg_dir);
}
-/**
+/*
* mic_init_card_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_card_debugfs(void)
@@ -76,7 +76,7 @@ void __init mic_init_card_debugfs(void)
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
-/**
+/*
* mic_exit_card_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_card_debugfs(void)
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 68a731fd86de..cb55653cf1f9 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -15,7 +15,7 @@
/* Debugfs parent dir */
static struct dentry *cosm_dbg;
-/**
+/*
* log_buf_show - Display MIC kernel log buffer
*
* log_buf addr/len is read from System.map by user space
@@ -68,7 +68,7 @@ done:
DEFINE_SHOW_ATTRIBUTE(log_buf);
-/**
+/*
* force_reset_show - Force MIC reset
*
* Invokes the force_reset COSM bus op instead of the standard reset
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
index f9133c4f6105..ebb0eac43754 100644
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -26,6 +26,7 @@ static atomic_t g_num_dev;
/**
* cosm_hw_reset - Issue a HW reset for the MIC device
* @cdev: pointer to cosm_device instance
+ * @force: force a MIC to reset even if it is already reset and ready
*/
static void cosm_hw_reset(struct cosm_device *cdev, bool force)
{
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index ab0db7a2ac8c..ffda740e20d5 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -101,7 +101,7 @@ static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
DEFINE_SHOW_ATTRIBUTE(mic_msi_irq_info);
-/**
+/*
* mic_create_debug_dir - Initialize MIC debugfs entries.
*/
void mic_create_debug_dir(struct mic_device *mdev)
@@ -124,7 +124,7 @@ void mic_create_debug_dir(struct mic_device *mdev)
&mic_msi_irq_info_fops);
}
-/**
+/*
* mic_delete_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_debug_dir(struct mic_device *mdev)
@@ -132,7 +132,7 @@ void mic_delete_debug_dir(struct mic_device *mdev)
debugfs_remove_recursive(mdev->dbg_dir);
}
-/**
+/*
* mic_init_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_debugfs(void)
@@ -140,7 +140,7 @@ void __init mic_init_debugfs(void)
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
-/**
+/*
* mic_exit_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_debugfs(void)
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index 433d35dc1721..85b3221b5d40 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -37,6 +37,8 @@ static irqreturn_t mic_thread_fn(int irq, void *dev)
/**
* mic_interrupt - Generic interrupt handler for
* MSI and INTx based interrupts.
+ * @irq: interrupt to handle (unused)
+ * @dev: pointer to the mic_device instance
*/
static irqreturn_t mic_interrupt(int irq, void *dev)
{
@@ -180,7 +182,7 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
* mic_setup_msix - Initializes MSIx interrupts.
*
* @mdev: pointer to mic_device instance
- *
+ * @pdev: PCI device structure
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index be0784fd1635..ea4608527ea0 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -164,7 +164,6 @@ static int mic_probe(struct pci_dev *pdev,
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
rc = -ENOMEM;
- dev_err(&pdev->dev, "mdev kmalloc failed rc %d\n", rc);
goto mdev_alloc_fail;
}
mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index d18cda966912..f5536c1ad607 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -17,6 +17,15 @@
#include "mic_x100.h"
#include "mic_smpt.h"
+static const u16 mic_x100_intr_init[] = {
+ MIC_X100_DOORBELL_IDX_START,
+ MIC_X100_DMA_IDX_START,
+ MIC_X100_ERR_IDX_START,
+ MIC_X100_NUM_DOORBELL,
+ MIC_X100_NUM_DMA,
+ MIC_X100_NUM_ERR,
+};
+
/**
* mic_x100_write_spad - write to the scratchpad register
* @mdev: pointer to mic_device instance
@@ -112,6 +121,7 @@ static void mic_x100_disable_interrupts(struct mic_device *mdev)
/**
* mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
* @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number
*/
static void mic_x100_send_sbox_intr(struct mic_device *mdev,
int doorbell)
@@ -133,6 +143,7 @@ static void mic_x100_send_sbox_intr(struct mic_device *mdev,
/**
* mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC.
* @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number
*/
static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
int doorbell)
@@ -494,6 +505,8 @@ static u32 mic_x100_get_postcode(struct mic_device *mdev)
/**
* mic_x100_smpt_set - Update an SMPT entry with a DMA address.
* @mdev: pointer to mic_device instance
+ * @dma_addr: DMA address to use
+ * @index: entry to write to
*
* RETURNS: none.
*/
diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h
index 1f727a6f609c..aebcaed6fa72 100644
--- a/drivers/misc/mic/host/mic_x100.h
+++ b/drivers/misc/mic/host/mic_x100.h
@@ -67,15 +67,6 @@
#define MIC_X100_FW_SIZE 5
#define MIC_X100_POSTCODE 0x242c
-static const u16 mic_x100_intr_init[] = {
- MIC_X100_DOORBELL_IDX_START,
- MIC_X100_DMA_IDX_START,
- MIC_X100_ERR_IDX_START,
- MIC_X100_NUM_DOORBELL,
- MIC_X100_NUM_DMA,
- MIC_X100_NUM_ERR,
-};
-
/* Host->Card(bootstrap) Interrupt Vector */
#define MIC_X100_BSP_INTERRUPT_VECTOR 229
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 781217c030a6..9cc6b2a6cf22 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -713,7 +713,7 @@ int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
}
EXPORT_SYMBOL_GPL(scif_connect);
-/**
+/*
* scif_accept() - Accept a connection request from the remote node
*
* The function accepts a connection request from the remote node. Successful
@@ -997,7 +997,6 @@ static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
{
- int read_size;
struct scif_endpt *ep = (struct scif_endpt *)epd;
struct scifmsg notif_msg;
int curr_recv_len = 0, remaining_len = len, read_count;
@@ -1017,8 +1016,7 @@ static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
* important for the Non Blocking case.
*/
curr_recv_len = min(remaining_len, read_count);
- read_size = scif_rb_get_next(&qp->inbound_q,
- msg, curr_recv_len);
+ scif_rb_get_next(&qp->inbound_q, msg, curr_recv_len);
if (ep->state == SCIFEP_CONNECTED) {
/*
* Update the read pointer only if the endpoint
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index c7c873409184..401b98e5ad79 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -99,7 +99,7 @@ int scif_reserve_dma_chan(struct scif_endpt *ep)
}
#ifdef CONFIG_MMU_NOTIFIER
-/**
+/*
* scif_rma_destroy_tcw:
*
* This routine destroys temporary cached windows
@@ -332,6 +332,7 @@ static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
* @epd: End Point Descriptor.
* @addr: virtual address to/from which to copy
* @len: length of range to copy
+ * @prot: read/write protection
* @out_offset: computed offset returned by reference.
* @out_window: allocated registered window returned by reference.
*
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
index 590baca9dc7b..426687f6696b 100644
--- a/drivers/misc/mic/scif/scif_epd.c
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -104,6 +104,7 @@ void scif_cleanup_zombie_epd(void)
/**
* scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* This message is initiated by the remote node to request a connection
@@ -155,6 +156,7 @@ conreq_sendrej:
/**
* scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* An accept() on the remote node has occurred and sent this message
@@ -181,6 +183,7 @@ void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote connection request has finished mapping the local memory.
@@ -203,6 +206,7 @@ void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote connection request failed to map the local memory it was sent.
@@ -221,6 +225,7 @@ void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote end has rejected the connection request. Set the end
@@ -240,6 +245,7 @@ void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_discnct() - Respond to SCIF_DISCNCT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote node has indicated close() has been called on its end
@@ -301,6 +307,7 @@ discnct_ack:
/**
* scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side has indicated it has not more references to local resources
@@ -317,6 +324,7 @@ void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is confirming send or receive interrupt handling is complete.
@@ -333,6 +341,7 @@ void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is confirming send or receive interrupt handling is complete.
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index 657fd4a20656..4fedf6183951 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -11,6 +11,7 @@
/**
* scif_recv_mark: Handle SCIF_MARK request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a mark.
@@ -33,6 +34,7 @@ void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a SCIF_MARK message.
@@ -56,6 +58,7 @@ void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_wait: Handle SCIF_WAIT request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested waiting on a fence.
@@ -93,6 +96,7 @@ void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a SCIF_WAIT message.
@@ -114,6 +118,7 @@ void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a signal on a local offset.
@@ -135,6 +140,7 @@ void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a signal on a remote offset.
@@ -156,6 +162,7 @@ void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a signal request.
@@ -280,12 +287,12 @@ alloc_fail:
return err;
}
-/*
+/**
* scif_prog_signal:
- * @epd - Endpoint Descriptor
- * @offset - registered address to write @val to
- * @val - Value to be written at @offset
- * @type - Type of the window.
+ * @epd: Endpoint Descriptor
+ * @offset: registered address to write @val to
+ * @val: Value to be written at @offset
+ * @type: Type of the window.
*
* Arrange to write a value to the registered offset after ensuring that the
* offset provided is indeed valid.
@@ -501,12 +508,12 @@ retry:
/**
* scif_send_fence_signal:
- * @epd - endpoint descriptor
- * @loff - local offset
- * @lval - local value to write to loffset
- * @roff - remote offset
- * @rval - remote value to write to roffset
- * @flags - flags
+ * @epd: endpoint descriptor
+ * @loff: local offset
+ * @lval: local value to write to loffset
+ * @roff: remote offset
+ * @rval: remote value to write to roffset
+ * @flags: flags
*
* Sends a remote fence signal request
*/
@@ -577,10 +584,11 @@ static void scif_fence_mark_cb(void *arg)
atomic_dec(&ep->rma_info.fence_refcount);
}
-/*
+/**
* _scif_fence_mark:
+ * @epd: endpoint descriptor
+ * @mark: DMA mark to set-up
*
- * @epd - endpoint descriptor
* Set up a mark for this endpoint and return the value of the mark.
*/
int _scif_fence_mark(scif_epd_t epd, int *mark)
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
index c537df84539a..c4d9422082b7 100644
--- a/drivers/misc/mic/scif/scif_nm.c
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -14,6 +14,8 @@
/**
* scif_invalidate_ep() - Set state for all connected endpoints
* to disconnected and wake up all send/recv waitqueues
+ *
+ * @node: Node to invalidate
*/
static void scif_invalidate_ep(int node)
{
@@ -99,11 +101,10 @@ void scif_send_acks(struct scif_dev *dev)
}
}
-/*
- * scif_cleanup_scifdev
- *
+/**
+ * scif_cleanup_scifdev - Uninitialize SCIF data structures for remote
+ * SCIF device.
* @dev: Remote SCIF device.
- * Uninitialize SCIF data structures for remote SCIF device.
*/
void scif_cleanup_scifdev(struct scif_dev *dev)
{
@@ -136,8 +137,8 @@ void scif_cleanup_scifdev(struct scif_dev *dev)
scif_cleanup_qp(dev);
}
-/*
- * scif_remove_node:
+/**
+ * scif_remove_node
*
* @node: Node to remove
*/
@@ -162,9 +163,9 @@ static int scif_send_rmnode_msg(int node, int remove_node)
}
/**
- * scif_node_disconnect:
+ * scif_node_disconnect
*
- * @node_id[in]: source node id.
+ * @node_id: source node id [in]
* @mgmt_initiated: Disconnection initiated from the mgmt node
*
* Disconnect a node from the scif network.
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index ea084626fe11..e0748be373f1 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -443,6 +443,7 @@ static void scif_deinit_p2p_info(struct scif_dev *scifdev,
/**
* scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
+ * @scifdev: SCIF device
* @dst: Destination node
*
* Connect the src and dst node by setting up the p2p connection
@@ -719,7 +720,7 @@ scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_exit() - Respond to SCIF_EXIT interrupt message
* @scifdev: Remote SCIF device node
- * @msg: Interrupt message
+ * @unused: Interrupt message (unused)
*
* This function stops the SCIF interface for the node which sent
* the SCIF_EXIT message and starts waiting for that node to
@@ -740,7 +741,7 @@ scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
/**
* scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
* @scifdev: Remote SCIF device node
- * @msg: Interrupt message
+ * @unused: Interrupt message (unused)
*
*/
static __always_inline void
@@ -930,6 +931,7 @@ local_error:
/**
* scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* SCIF_NODE_ADD failed, so inform the waiting wq.
@@ -946,8 +948,9 @@ scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
}
}
-/*
+/**
* scif_node_remove: Handle SCIF_NODE_REMOVE message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* Handle node removal.
@@ -962,8 +965,9 @@ scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
scif_handle_remove_node(node);
}
-/*
+/**
* scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* The peer has acked a SCIF_NODE_REMOVE message.
@@ -979,6 +983,7 @@ scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* Retrieve node info i.e maxid and total from the mgmt node.
@@ -1058,6 +1063,7 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
scif_recv_sig_resp, /* SCIF_SIG_NACK */
};
+static int scif_max_msg_id = SCIF_MAX_MSG;
/**
* scif_nodeqp_msg_handler() - Common handler for node messages
* @scifdev: Remote device to respond to
@@ -1067,8 +1073,6 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
* This routine calls the appropriate routine to handle a Node Qp
* message receipt
*/
-static int scif_max_msg_id = SCIF_MAX_MSG;
-
static void
scif_nodeqp_msg_handler(struct scif_dev *scifdev,
struct scif_qp *qp, struct scifmsg *msg)
@@ -1117,7 +1121,7 @@ void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
/**
* scif_loopb_wq_handler - Loopback Workqueue Handler.
- * @work: loop back work
+ * @unused: loop back work (unused)
*
* This work queue routine is invoked by the loopback work queue handler.
* It grabs the recv lock, dequeues any available messages from the head
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
index 547a71285069..4bdb5ef9a139 100644
--- a/drivers/misc/mic/scif/scif_ports.c
+++ b/drivers/misc/mic/scif/scif_ports.c
@@ -14,11 +14,11 @@
struct idr scif_ports;
-/*
+/**
* struct scif_port - SCIF port information
*
- * @ref_cnt - Reference count since there can be multiple endpoints
- * created via scif_accept(..) simultaneously using a port.
+ * @ref_cnt: Reference count since there can be multiple endpoints
+ * created via scif_accept(..) simultaneously using a port.
*/
struct scif_port {
int ref_cnt;
@@ -27,7 +27,8 @@ struct scif_port {
/**
* __scif_get_port - Reserve a specified port # for SCIF and add it
* to the global list.
- * @port : port # to be reserved.
+ * @start: lowest port # to be reserved (inclusive).
+ * @end: highest port # to be reserved (exclusive).
*
* @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
* On memory allocation failure, returns -ENOMEM.
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 406cd5abfa72..de8f61efaef5 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -458,7 +458,7 @@ static void scif_destroy_remote_lookup(struct scif_dev *remote_dev,
/**
* scif_create_remote_window:
- * @ep: end point
+ * @scifdev: SCIF device
* @nr_pages: number of pages in window
*
* Allocate and prepare a remote registration window.
@@ -500,7 +500,6 @@ error_ret:
/**
* scif_destroy_remote_window:
- * @ep: end point
* @window: remote registration window
*
* Deallocate resources for remote window.
@@ -1037,6 +1036,7 @@ void scif_free_window_offset(struct scif_endpt *ep,
/**
* scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is requesting a memory allocation.
@@ -1072,6 +1072,7 @@ error:
/**
* scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side responded to a memory allocation.
@@ -1096,6 +1097,7 @@ void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Free up memory kmalloc'd earlier.
@@ -1134,6 +1136,7 @@ scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window)
/**
* scif_recv_reg: Respond to SCIF_REGISTER interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Update remote window list with a new registered window.
@@ -1170,6 +1173,7 @@ void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remove window from remote registration list;
@@ -1235,6 +1239,7 @@ error:
/**
* scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to complete registration.
@@ -1253,6 +1258,7 @@ void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to inform it that registration
@@ -1272,6 +1278,7 @@ void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to complete unregistration.
@@ -1290,6 +1297,7 @@ void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to inform it that unregistration
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 85942f6717c5..55e7f21e51f4 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -438,7 +438,7 @@ error:
/*
* The config ops structure as defined by virtio config
*/
-static struct virtio_config_ops vop_vq_config_ops = {
+static const struct virtio_config_ops vop_vq_config_ops = {
.get_features = vop_get_features,
.finalize_features = vop_finalize_features,
.get = vop_get,
@@ -546,7 +546,7 @@ static int vop_match_desc(struct device *dev, void *data)
return vdev->desc == (void __iomem *)data;
}
-static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
+static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl __iomem *dc)
{
return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
}
@@ -614,7 +614,6 @@ static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
struct mic_device_desc __iomem *d;
struct mic_device_ctrl __iomem *dc;
struct device *dev;
- int ret;
for (i = sizeof(struct mic_bootparam);
i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
@@ -644,7 +643,7 @@ static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
&dc->config_change);
put_device(dev);
_vop_handle_config_change(d, i, vpdev);
- ret = _vop_remove_device(d, i, vpdev);
+ _vop_remove_device(d, i, vpdev);
if (remove) {
iowrite8(0, &dc->config_change);
iowrite8(0, &dc->guest_ack);
@@ -763,7 +762,7 @@ static void vop_driver_remove(struct vop_device *vpdev)
kfree(vi);
}
-static struct vop_device_id id_table[] = {
+static const struct vop_device_id id_table[] = {
{ VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 60828af7506a..8d2b7135738e 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -135,6 +135,7 @@ static DEFINE_MUTEX(pch_phub_mutex);
/**
* pch_phub_read_modify_write_reg() - Reading modifying and writing register
+ * @chip: Pointer to the PHUB register structure
* @reg_addr_offset: Register offset address value.
* @data: Writing value.
* @mask: Mask value.
@@ -147,9 +148,8 @@ static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
}
-#ifdef CONFIG_PM
/* pch_phub_save_reg_conf - saves register configuration */
-static void pch_phub_save_reg_conf(struct pci_dev *pdev)
+static void __maybe_unused pch_phub_save_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -210,7 +210,7 @@ static void pch_phub_save_reg_conf(struct pci_dev *pdev)
}
/* pch_phub_restore_reg_conf - restore register configuration */
-static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
+static void __maybe_unused pch_phub_restore_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -270,10 +270,10 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
}
-#endif
/**
* pch_phub_read_serial_rom() - Reading Serial ROM
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address to read.
* @data: Read buffer for specified Serial ROM value.
*/
@@ -288,6 +288,7 @@ static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
/**
* pch_phub_write_serial_rom() - Writing Serial ROM
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address.
* @data: Serial ROM value to write.
*/
@@ -326,6 +327,7 @@ static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
/**
* pch_phub_read_serial_rom_val() - Read Serial ROM value
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value to read.
*/
@@ -342,6 +344,7 @@ static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
/**
* pch_phub_write_serial_rom_val() - writing Serial ROM value
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value.
*/
@@ -443,7 +446,7 @@ static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip)
/**
* pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
- * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @chip: Pointer to the PHUB register structure
* @data: Buffer of the Gigabit Ethernet MAC address value.
*/
static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
@@ -455,7 +458,7 @@ static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
/**
* pch_phub_write_gbe_mac_addr() - Write MAC address
- * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @chip: Pointer to the PHUB register structure
* @data: Gigabit Ethernet MAC address value.
*/
static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
@@ -835,48 +838,19 @@ static void pch_phub_remove(struct pci_dev *pdev)
kfree(chip);
}
-#ifdef CONFIG_PM
-
-static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_phub_suspend(struct device *dev_d)
{
- int ret;
-
- pch_phub_save_reg_conf(pdev);
- ret = pci_save_state(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- " %s -pci_save_state returns %d\n", __func__, ret);
- return ret;
- }
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ device_wakeup_disable(dev_d);
return 0;
}
-static int pch_phub_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_phub_resume(struct device *dev_d)
{
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
- return ret;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pch_phub_restore_reg_conf(pdev);
+ device_wakeup_disable(dev_d);
return 0;
}
-#else
-#define pch_phub_suspend NULL
-#define pch_phub_resume NULL
-#endif /* CONFIG_PM */
static const struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, },
@@ -888,13 +862,14 @@ static const struct pci_device_id pch_phub_pcidev_id[] = {
};
MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
+static SIMPLE_DEV_PM_OPS(pch_phub_pm_ops, pch_phub_suspend, pch_phub_resume);
+
static struct pci_driver pch_phub_driver = {
.name = "pch_phub",
.id_table = pch_phub_pcidev_id,
.probe = pch_phub_probe,
.remove = pch_phub_remove,
- .suspend = pch_phub_suspend,
- .resume = pch_phub_resume
+ .driver.pm = &pch_phub_pm_ops,
};
module_pci_driver(pch_phub_driver);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 6a5ed0e25ff1..ce72e46a2e73 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -457,31 +457,26 @@ static void phantom_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int phantom_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused phantom_suspend(struct device *dev_d)
{
- struct phantom_device *dev = pci_get_drvdata(pdev);
+ struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
- synchronize_irq(pdev->irq);
+ synchronize_irq(to_pci_dev(dev_d)->irq);
return 0;
}
-static int phantom_resume(struct pci_dev *pdev)
+static int __maybe_unused phantom_resume(struct device *dev_d)
{
- struct phantom_device *dev = pci_get_drvdata(pdev);
+ struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
return 0;
}
-#else
-#define phantom_suspend NULL
-#define phantom_resume NULL
-#endif
static struct pci_device_id phantom_pci_tbl[] = {
{ .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
@@ -491,13 +486,14 @@ static struct pci_device_id phantom_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
+static SIMPLE_DEV_PM_OPS(phantom_pm_ops, phantom_suspend, phantom_resume);
+
static struct pci_driver phantom_pci_driver = {
.name = "phantom",
.id_table = phantom_pci_tbl,
.probe = phantom_probe,
.remove = phantom_remove,
- .suspend = phantom_suspend,
- .resume = phantom_resume
+ .driver.pm = &phantom_pm_ops,
};
static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index b7f510676cd6..7236ae527b19 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -496,9 +496,8 @@ static void pti_tty_cleanup(struct tty_struct *tty)
* pti_tty_driver_write()- Write trace debugging data through the char
* interface to the PTI HW. Part of the misc device implementation.
*
- * @filp: Contains private data which is used to obtain
- * master, channel write ID.
- * @data: trace data to be written.
+ * @tty: tty struct containing pti information.
+ * @buf: trace data to be written.
* @len: # of byte to write.
*
* Returns:
@@ -734,8 +733,8 @@ static struct console pti_console = {
* pti_port_activate()- Used to start/initialize any items upon
* first opening of tty_port().
*
- * @port- The tty port number of the PTI device.
- * @tty- The tty struct associated with this device.
+ * @port: The tty port number of the PTI device.
+ * @tty: The tty struct associated with this device.
*
* Returns:
* always returns 0
@@ -755,7 +754,7 @@ static int pti_port_activate(struct tty_port *port, struct tty_struct *tty)
* pti_port_shutdown()- Used to stop/shutdown any items upon the
* last tty port close.
*
- * @port- The tty port number of the PTI device.
+ * @port: The tty port number of the PTI device.
*
* Notes: The primary purpose of the PTI tty port 0 is to hook
* the syslog daemon to it; thus this port will be open for a
@@ -781,8 +780,8 @@ static const struct tty_port_operations tty_port_ops = {
* pti_pci_probe()- Used to detect pti on the pci bus and set
* things up in the driver.
*
- * @pdev- pci_dev struct values for pti.
- * @ent- pci_device_id struct for pti driver.
+ * @pdev: pci_dev struct values for pti.
+ * @ent: pci_device_id struct for pti driver.
*
* Returns:
* 0 for success
@@ -899,7 +898,6 @@ static struct pci_driver pti_pci_driver = {
};
/**
- *
* pti_init()- Overall entry/init call to the pti driver.
* It starts the registration process with the kernel.
*
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index b1521112dbbd..723825524ea0 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/sync_core.h>
#include <linux/prefetch.h>
#include "gru.h"
#include "grutables.h"
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index f7224f90f413..1d75d5e540bc 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -16,6 +16,7 @@
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
+#include <linux/sync_core.h>
#include <asm/tsc.h>
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 0197441a1eae..f6e600bfac5d 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -16,6 +16,7 @@
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
+#include <linux/sync_core.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/export.h>
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 5fd94d836070..61b03fcefb13 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -223,7 +223,7 @@ xpc_disconnect(int ch_number)
}
EXPORT_SYMBOL_GPL(xpc_disconnect);
-int __init
+static int __init
xp_init(void)
{
enum xp_retval ret;
@@ -246,7 +246,7 @@ xp_init(void)
module_init(xp_init);
-void __exit
+static void __exit
xp_exit(void)
{
if (is_uv())
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index cb57ac6ab4c3..6cc31789b38d 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -1,7 +1,7 @@
/*
* SRAM protect-exec region helper functions
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 14136d2cc8f9..f4ddd1e67015 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -18,7 +18,8 @@
extern void st_kim_recv(void *, const unsigned char *, long);
void st_int_recv(void *, const unsigned char *, long);
-/* function pointer pointing to either,
+/*
+ * function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
*/
@@ -60,7 +61,8 @@ int st_get_uart_wr_room(struct st_data_s *st_gdata)
return tty->ops->write_room(tty);
}
-/* can be called in from
+/*
+ * can be called in from
* -- KIM (during fw download)
* -- ST Core (during st_write)
*
@@ -100,7 +102,8 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
kfree_skb(st_gdata->rx_skb);
return;
}
- /* this cannot fail
+ /*
+ * this cannot fail
* this shouldn't take long
* - should be just skb_queue_tail for the
* protocol stack driver
@@ -121,9 +124,8 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
return;
}
-/**
- * st_reg_complete -
- * to call registration complete callbacks
+/*
+ * st_reg_complete - to call registration complete callbacks
* of all protocol stack drivers
* This function is being called with spin lock held, protocol drivers are
* only expected to complete their waits and do nothing more than that.
@@ -156,21 +158,24 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
pr_debug("len %d room %d", len, room);
if (!len) {
- /* Received packet has only packet header and
+ /*
+ * Received packet has only packet header and
* has zero length payload. So, ask ST CORE to
* forward the packet to protocol driver (BT/FM/GPS)
*/
st_send_frame(chnl_id, st_gdata);
} else if (len > room) {
- /* Received packet's payload length is larger.
+ /*
+ * Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(st_gdata->rx_skb);
} else {
- /* Packet header has non-zero payload length and
+ /*
+ * Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
st_gdata->rx_state = ST_W4_DATA;
@@ -178,8 +183,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return len;
}
- /* Change ST state to continue to process next
- * packet */
+ /* Change ST state to continue to process next packet */
st_gdata->rx_state = ST_W4_PACKET_TYPE;
st_gdata->rx_skb = NULL;
st_gdata->rx_count = 0;
@@ -188,7 +192,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return 0;
}
-/**
+/*
* st_wakeup_ack - internal function for action when wake-up ack
* received
*/
@@ -199,7 +203,8 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
unsigned long flags = 0;
spin_lock_irqsave(&st_gdata->lock, flags);
- /* de-Q from waitQ and Q in txQ now that the
+ /*
+ * de-Q from waitQ and Q in txQ now that the
* chip is awake
*/
while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
@@ -213,7 +218,7 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
st_tx_wakeup(st_gdata);
}
-/**
+/*
* st_int_recv - ST's internal receive function.
* Decodes received RAW data and forwards to corresponding
* client drivers (Bluetooth,FM,GPS..etc).
@@ -262,8 +267,10 @@ void st_int_recv(void *disc_data,
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
- /* Ask ST CORE to forward
- * the packet to protocol driver */
+ /*
+ * Ask ST CORE to forward
+ * the packet to protocol driver
+ */
st_send_frame(st_gdata->rx_chnl, st_gdata);
st_gdata->rx_state = ST_W4_PACKET_TYPE;
@@ -276,7 +283,7 @@ void st_int_recv(void *disc_data,
&st_gdata->rx_skb->data
[proto->offset_len_in_hdr];
pr_debug("plen pointing to %x\n", *plen);
- if (proto->len_size == 1)/* 1 byte len field */
+ if (proto->len_size == 1) /* 1 byte len field */
payload_len = *(unsigned char *)plen;
else if (proto->len_size == 2)
payload_len =
@@ -294,18 +301,23 @@ void st_int_recv(void *disc_data,
}
/* end of if rx_count */
- /* Check first byte of packet and identify module
- * owner (BT/FM/GPS) */
+
+ /*
+ * Check first byte of packet and identify module
+ * owner (BT/FM/GPS)
+ */
switch (*ptr) {
case LL_SLEEP_IND:
case LL_SLEEP_ACK:
case LL_WAKE_UP_IND:
pr_debug("PM packet");
- /* this takes appropriate action based on
+ /*
+ * this takes appropriate action based on
* sleep state received --
*/
st_ll_sleep_state(st_gdata, *ptr);
- /* if WAKEUP_IND collides copy from waitq to txq
+ /*
+ * if WAKEUP_IND collides copy from waitq to txq
* and assume chip awake
*/
spin_unlock_irqrestore(&st_gdata->lock, flags);
@@ -331,7 +343,8 @@ void st_int_recv(void *disc_data,
default:
type = *ptr;
- /* Default case means non-HCILL packets,
+ /*
+ * Default case means non-HCILL packets,
* possibilities are packets for:
* (a) valid protocol - Supported Protocols within
* the ST_MAX_CHANNELS.
@@ -377,7 +390,7 @@ done:
return;
}
-/**
+/*
* st_int_dequeue - internal de-Q function.
* If the previous data set was not written
* completely, return that skb which has the pending data.
@@ -396,7 +409,7 @@ static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
return skb_dequeue(&st_gdata->txq);
}
-/**
+/*
* st_int_enqueue - internal Q-ing function.
* Will either Q the skb to txq or the tx_waitq
* depending on the ST LL state.
@@ -561,7 +574,8 @@ long st_register(struct st_proto_s *new_proto)
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
- /* this may take a while to complete
+ /*
+ * this may take a while to complete
* since it involves BT fw download
*/
err = st_kim_start(st_gdata->kim_data);
@@ -583,7 +597,8 @@ long st_register(struct st_proto_s *new_proto)
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
- /* this is where all pending registration
+ /*
+ * this is where all pending registration
* are signalled to be complete by calling callback functions
*/
if ((st_gdata->protos_registered != ST_EMPTY) &&
@@ -593,7 +608,8 @@ long st_register(struct st_proto_s *new_proto)
}
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
- /* check for already registered once more,
+ /*
+ * check for already registered once more,
* since the above check is old
*/
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
@@ -622,7 +638,8 @@ long st_register(struct st_proto_s *new_proto)
}
EXPORT_SYMBOL_GPL(st_register);
-/* to unregister a protocol -
+/*
+ * to unregister a protocol -
* to be called from protocol stack driver
*/
long st_unregister(struct st_proto_s *proto)
@@ -742,7 +759,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_info("%s ", __func__);
- /* TODO:
+ /*
+ * TODO:
* if a protocol has been registered & line discipline
* un-installed for some reason - what should be done ?
*/
@@ -795,7 +813,8 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
pr_debug("done %s", __func__);
}
-/* wake-up function called in from the TTY layer
+/*
+ * wake-up function called in from the TTY layer
* inside the internal wakeup function will be called
*/
static void st_tty_wakeup(struct tty_struct *tty)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index a36ed1ff5967..f2f6cab97c08 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -30,7 +30,7 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
-/**
+/*
* st_get_plat_device -
* function which returns the reference to the platform device
* requested by id. As of now only 1 such device exists (id=0)
@@ -43,7 +43,7 @@ static struct platform_device *st_get_plat_device(int id)
return st_kim_devices[id];
}
-/**
+/*
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
@@ -55,7 +55,8 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
if (!skb)
return;
- /* these magic numbers are the position in the response buffer which
+ /*
+ * these magic numbers are the position in the response buffer which
* allows us to distinguish whether the response is for the read
* version info. command
*/
@@ -79,7 +80,8 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
kfree_skb(skb);
}
-/* check for data len received inside kim_int_recv
+/*
+ * check for data len received inside kim_int_recv
* most often hit the last case to update state to waiting for data
*/
static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
@@ -91,14 +93,16 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
if (!len) {
validate_firmware_response(kim_gdata);
} else if (len > room) {
- /* Received packet's payload length is larger.
+ /*
+ * Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(kim_gdata->rx_skb);
} else {
- /* Packet header has non-zero payload length and
+ /*
+ * Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
kim_gdata->rx_state = ST_W4_DATA;
@@ -106,8 +110,10 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
return len;
}
- /* Change ST LL state to continue to process next
- * packet */
+ /*
+ * Change ST LL state to continue to process next
+ * packet
+ */
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
@@ -115,7 +121,7 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
return 0;
}
-/**
+/*
* kim_int_recv - receive function called during firmware download
* firmware download responses on different UART drivers
* have been observed to come in bursts of different
@@ -216,7 +222,8 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
return timeout ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
- /* the positions 12 & 13 in the response buffer provide with the
+ /*
+ * the positions 12 & 13 in the response buffer provide with the
* chip, major & minor numbers
*/
@@ -263,7 +270,7 @@ static void skip_change_remote_baud(unsigned char **ptr, long *len)
}
}
-/**
+/*
* download_firmware -
* internal function which parses through the .bts firmware
* script file intreprets SEND, DELAY actions only as of now
@@ -295,7 +302,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
}
ptr = (void *)kim_gdata->fw_entry->data;
len = kim_gdata->fw_entry->size;
- /* bts_header to remove out magic number and
+ /*
+ * bts_header to remove out magic number and
* version
*/
ptr += sizeof(struct bts_header);
@@ -313,8 +321,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
if (unlikely
(((struct hci_command *)action_ptr)->opcode ==
0xFF36)) {
- /* ignore remote change
- * baud rate HCI VS command */
+ /*
+ * ignore remote change
+ * baud rate HCI VS command
+ */
pr_warn("change remote baud"
" rate command in firmware");
skip_change_remote_baud(&ptr, &len);
@@ -346,7 +356,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
- /* reinit completion before sending for the
+ /*
+ * reinit completion before sending for the
* relevant wait
*/
reinit_completion(&kim_gdata->kim_rcvd);
@@ -418,14 +429,16 @@ void st_kim_recv(void *disc_data, const unsigned char *data, long count)
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
struct kim_data_s *kim_gdata = st_gdata->kim_data;
- /* proceed to gather all data and distinguish read fw version response
+ /*
+ * proceed to gather all data and distinguish read fw version response
* from other fw responses when data gathering is complete
*/
kim_int_recv(kim_gdata, data, count);
return;
}
-/* to signal completion of line discipline installation
+/*
+ * to signal completion of line discipline installation
* called from ST Core, upon tty_open
*/
void st_kim_complete(void *kim_data)
@@ -434,7 +447,7 @@ void st_kim_complete(void *kim_data)
complete(&kim_gdata->ldisc_installed);
}
-/**
+/*
* st_kim_start - called from ST Core upon 1st registration
* This involves toggling the chip enable gpio, reading
* the firmware version from chip, forming the fw file name
@@ -472,8 +485,10 @@ long st_kim_start(void *kim_data)
err = wait_for_completion_interruptible_timeout(
&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME));
if (!err) {
- /* ldisc installation timeout,
- * flush uart, power cycle BT_EN */
+ /*
+ * ldisc installation timeout,
+ * flush uart, power cycle BT_EN
+ */
pr_err("ldisc installation timeout");
err = st_kim_stop(kim_gdata);
continue;
@@ -482,8 +497,10 @@ long st_kim_start(void *kim_data)
pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
- /* ldisc installed but fw download failed,
- * flush uart & power cycle BT_EN */
+ /*
+ * ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN
+ */
pr_err("download firmware failed");
err = st_kim_stop(kim_gdata);
continue;
@@ -495,7 +512,7 @@ long st_kim_start(void *kim_data)
return err;
}
-/**
+/*
* st_kim_stop - stop communication with chip.
* This can be called from ST Core/KIM, on the-
* (a) last un-register when chip need not be powered there-after,
@@ -650,7 +667,7 @@ static const struct attribute_group uim_attr_grp = {
.attrs = uim_attrs,
};
-/**
+/*
* st_kim_ref - reference the core's data
* This references the per-ST platform device in the arch/xx/
* board-xx.c file.
@@ -729,8 +746,7 @@ static int kim_probe(struct platform_device *pdev)
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
goto err_sysfs_group;
}
- /* get reference of pdev for request_firmware
- */
+ /* get reference of pdev for request_firmware */
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
@@ -772,7 +788,8 @@ static int kim_remove(struct platform_device *pdev)
kim_gdata = platform_get_drvdata(pdev);
- /* Free the Bluetooth/FM/GPIO
+ /*
+ * Free the Bluetooth/FM/GPIO
* nShutdown gpio from the system
*/
gpio_free(pdata->nshutdown_gpio);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index e6b40aa8fb42..228f2eb1d476 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -207,10 +207,9 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
spin_unlock_irqrestore(&fm->lock, flags);
}
-#ifdef CONFIG_PM
-
-static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused tifm_7xx1_suspend(struct device *dev_d)
{
+ struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
@@ -221,15 +220,13 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
}
- pci_save_state(dev);
- pci_enable_wake(dev, pci_choose_state(dev, state), 0);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
+ device_wakeup_disable(dev_d);
return 0;
}
-static int tifm_7xx1_resume(struct pci_dev *dev)
+static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
{
+ struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
unsigned long timeout;
@@ -242,11 +239,6 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids)))
return -ENXIO;
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
pci_set_master(dev);
dev_dbg(&dev->dev, "resuming host\n");
@@ -297,13 +289,6 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
return 0;
}
-#else
-
-#define tifm_7xx1_suspend NULL
-#define tifm_7xx1_resume NULL
-
-#endif /* CONFIG_PM */
-
static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
@@ -424,13 +409,14 @@ static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
{ }
};
+static SIMPLE_DEV_PM_OPS(tifm_7xx1_pm_ops, tifm_7xx1_suspend, tifm_7xx1_resume);
+
static struct pci_driver tifm_7xx1_driver = {
.name = DRIVER_NAME,
.id_table = tifm_7xx1_pci_tbl,
.probe = tifm_7xx1_probe,
.remove = tifm_7xx1_remove,
- .suspend = tifm_7xx1_suspend,
- .resume = tifm_7xx1_resume,
+ .driver.pm = &tifm_7xx1_pm_ops,
};
module_pci_driver(tifm_7xx1_driver);
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 107028e77ca3..aa91f69a5fa9 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -179,14 +179,6 @@ static int uacce_fops_release(struct inode *inode, struct file *filep)
return 0;
}
-static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
-{
- if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
- return VM_FAULT_SIGBUS;
-
- return 0;
-}
-
static void uacce_vma_close(struct vm_area_struct *vma)
{
struct uacce_queue *q = vma->vm_private_data;
@@ -199,7 +191,6 @@ static void uacce_vma_close(struct vm_area_struct *vma)
}
static const struct vm_operations_struct uacce_vm_ops = {
- .fault = uacce_vma_fault,
.close = uacce_vma_close,
};