summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmit Cohen <amcohen@nvidia.com>2024-04-02 15:54:20 +0200
committerJakub Kicinski <kuba@kernel.org>2024-04-03 19:50:40 -0700
commitd4b3930b19f7339c3fb98fc3ed71388b0180e7a7 (patch)
tree3b028f4ba59163bcee60e2aace399b99d67264ef
parent29ad2a990648c5e6ec93101cb3719dc820d363e9 (diff)
mlxsw: pci: Poll command interface for each cmd_exec()
Command interface is used for configuring and querying FW when EMADs are not available. During the time that the driver sets up the asynchronous queues, it polls the command interface for getting completions. Then, there is a short period when asynchronous queues work, but EMADs are not available (marked in the code as nopoll = true). During this time, we send commands via command interface, but we do not poll it, as we can get an interrupt for the completion. Completions of command interface are received from HW in EQ0 (event queue 0). The usage of EQ0 instead of polling is done only 4 times during initialization and one time during tear down, but it makes an overhead during lifetime of the driver. For each interrupt, we have to check if we get events in EQ0 or EQ1 and handle them. This is really ineffective, especially because of the fact that EQ0 is used only as part of driver init/fini. Instead, we can poll command interface for each call of cmd_exec(). It means that when we send a command via command interface (as EMADs are not available), we will poll it, regardless of availability of the asynchronous queues. This will allow us to configure later only EQ1 and simplify the flow. Remove 'nopoll' indication and change mlxsw_pci_cmd_exec() to poll till answer/timeout regardless of queues' state. For now, completions are handled also by EQ0, but it will be removed in next patch. Additional cleanups will be added in next patches. Signed-off-by: Amit Cohen <amcohen@nvidia.com> Reviewed-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Petr Machata <petrm@nvidia.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://lore.kernel.org/r/e674c70380ceda953e0e45a77334c5d22e69938f.1712062203.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
1 files changed, 17 insertions, 31 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5688c14f7426..2ee397922936 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -111,7 +111,6 @@ struct mlxsw_pci {
struct mlxsw_pci_mem_item out_mbox;
struct mlxsw_pci_mem_item in_mbox;
struct mutex lock; /* Lock access to command registers */
- bool nopoll;
wait_queue_head_t wait;
bool wait_done;
struct {
@@ -1105,8 +1104,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
goto err_rdqs_init;
}
- /* We have to poll in command interface until queues are initialized */
- mlxsw_pci->cmd.nopoll = true;
return 0;
err_rdqs_init:
@@ -1120,7 +1117,6 @@ err_cqs_init:
static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
{
- mlxsw_pci->cmd.nopoll = false;
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
@@ -1848,9 +1844,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
- bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ unsigned long end;
int err;
*p_status = MLXSW_CMD_STATUS_OK;
@@ -1879,28 +1875,20 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
wmb(); /* all needs to be written before we write control register */
mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
MLXSW_PCI_CIR_CTRL_GO_BIT |
- (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
(opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
opcode);
- if (!evreq) {
- unsigned long end;
-
- end = jiffies + timeout;
- do {
- u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
- if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
- *p_wait_done = true;
- *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
- break;
- }
- cond_resched();
- } while (time_before(jiffies, end));
- } else {
- wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
- *p_status = mlxsw_pci->cmd.comp.status;
- }
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
err = 0;
if (*p_wait_done) {
@@ -1917,14 +1905,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
*/
__be32 tmp;
- if (!evreq) {
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_HI));
- memcpy(out_mbox, &tmp, sizeof(tmp));
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_LO));
- memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
- }
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
} else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}